[PATCH v2 4/9] function wrappers for upcoming socket

From: Glauber Costa
Date: Wed Sep 07 2011 - 00:27:24 EST


Instead of dealing with global values for memory pressure scenarios,
per-cgroup values will be needed. This patch just writes down the
acessor functions to be used later.

Signed-off-by: Glauber Costa <glommer@xxxxxxxxxxxxx>
CC: David S. Miller <davem@xxxxxxxxxxxxx>
CC: Hiroyouki Kamezawa <kamezawa.hiroyu@xxxxxxxxxxxxxx>
CC: Eric W. Biederman <ebiederm@xxxxxxxxxxxx>
---
include/linux/kmem_cgroup.h | 104 +++++++++++++++++++++++++++++++++++++++++++
1 files changed, 104 insertions(+), 0 deletions(-)

diff --git a/include/linux/kmem_cgroup.h b/include/linux/kmem_cgroup.h
index 77076d8..d983ba8 100644
--- a/include/linux/kmem_cgroup.h
+++ b/include/linux/kmem_cgroup.h
@@ -52,6 +52,110 @@ static inline struct kmem_cgroup *kcg_from_task(struct task_struct *tsk)

#ifdef CONFIG_INET
#include <net/sock.h>
+static inline int *sk_memory_pressure(struct sock *sk)
+{
+ int *ret = NULL;
+ if (sk->sk_prot->memory_pressure)
+ ret = sk->sk_prot->memory_pressure(sk->sk_cgrp);
+ return ret;
+}
+
+static inline long sk_prot_mem(struct sock *sk, int index)
+{
+ long *prot = sk->sk_prot->prot_mem(sk->sk_cgrp);
+ return prot[index];
+}
+
+static inline long
+sk_memory_allocated(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+ struct kmem_cgroup *cg = sk->sk_cgrp;
+
+ return atomic_long_read(prot->memory_allocated(cg));
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_failure)
+{
+ struct proto *prot = sk->sk_prot;
+ struct kmem_cgroup *cg = sk->sk_cgrp;
+ long allocated = atomic_long_add_return(amt, prot->memory_allocated(cg));
+
+#ifdef CONFIG_CGROUP_KMEM
+ for (cg = cg->parent; cg != NULL; cg = cg->parent) {
+ long alloc;
+ /*
+ * Large nestings are not the common case, and stopping in the
+ * middle would be complicated enough, that we bill it all the
+ * way through the root, and if needed, unbill everything later
+ */
+ alloc = atomic_long_add_return(amt, prot->memory_allocated(cg));
+ *parent_failure |= (alloc > sk_prot_mem(sk, 2));
+ }
+#endif
+ return allocated;
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt)
+{
+ struct proto *prot = sk->sk_prot;
+ struct kmem_cgroup *cg = sk->sk_cgrp;
+
+ atomic_long_sub(amt, prot->memory_allocated(cg));
+
+#ifdef CONFIG_CGROUP_KMEM
+ for (cg = sk->sk_cgrp->parent; cg != NULL; cg = cg->parent)
+ atomic_long_sub(amt, prot->memory_allocated(cg));
+#endif
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+ struct kmem_cgroup *cg = sk->sk_cgrp;
+
+ percpu_counter_dec(prot->sockets_allocated(cg));
+#ifdef CONFIG_CGROUP_KMEM
+ for (cg = sk->sk_cgrp->parent; cg; cg = cg->parent)
+ percpu_counter_dec(prot->sockets_allocated(cg));
+#endif
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+ struct kmem_cgroup *cg = sk->sk_cgrp;
+
+ percpu_counter_inc(prot->sockets_allocated(cg));
+#ifdef CONFIG_CGROUP_KMEM
+ for (cg = sk->sk_cgrp->parent; cg; cg = cg->parent)
+ percpu_counter_inc(prot->sockets_allocated(cg));
+#endif
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+ struct kmem_cgroup *cg = sk->sk_cgrp;
+
+ return percpu_counter_sum_positive(prot->sockets_allocated(cg));
+}
+
+static inline int
+kcg_sockets_allocated_sum_positive(struct proto *prot, struct kmem_cgroup *cg)
+{
+ return percpu_counter_sum_positive(prot->sockets_allocated(cg));
+}
+
+static inline long
+kcg_memory_allocated(struct proto *prot, struct kmem_cgroup *cg)
+{
+ return atomic_long_read(prot->memory_allocated(cg));
+}
+
static inline void sock_update_kmem_cgrp(struct sock *sk)
{
#ifdef CONFIG_CGROUP_KMEM
--
1.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/