[PATCH] memcg: cleanup memcg_check_events()

From: Kirill A. Shutemov
Date: Tue Dec 27 2011 - 13:17:13 EST


Signed-off-by: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx>
---
mm/memcontrol.c | 42 ++++++++++++++++++++++++------------------
1 files changed, 24 insertions(+), 18 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d643bd6..40c2236 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -108,11 +108,12 @@ enum mem_cgroup_events_index {
* than using jiffies etc. to handle periodic memcg event.
*/
enum mem_cgroup_events_target {
- MEM_CGROUP_TARGET_THRESH,
- MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_TARGET_NUMAINFO,
- MEM_CGROUP_NTARGETS,
+ MEM_CGROUP_TARGET_THRESH = BIT(1),
+ MEM_CGROUP_TARGET_SOFTLIMIT = BIT(2),
+ MEM_CGROUP_TARGET_NUMAINFO = BIT(3),
};
+#define MEM_CGROUP_NTARGETS 3
+
#define THRESHOLDS_EVENTS_TARGET (128)
#define SOFTLIMIT_EVENTS_TARGET (1024)
#define NUMAINFO_EVENTS_TARGET (1024)
@@ -743,7 +744,7 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
return total;
}

-static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
+static int mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
enum mem_cgroup_events_target target)
{
unsigned long val, next;
@@ -766,9 +767,9 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
break;
}
__this_cpu_write(memcg->stat->targets[target], next);
- return true;
+ return target;
}
- return false;
+ return 0;
}

/*
@@ -777,29 +778,34 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
*/
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
{
+ int flags;
+
preempt_disable();
- /* threshold event is triggered in finer grain than soft limit */
- if (unlikely(mem_cgroup_event_ratelimit(memcg,
- MEM_CGROUP_TARGET_THRESH))) {
- bool do_softlimit, do_numainfo;
+ flags = mem_cgroup_event_ratelimit(memcg, MEM_CGROUP_TARGET_THRESH);

- do_softlimit = mem_cgroup_event_ratelimit(memcg,
+ /*
+ * Threshold event is triggered in finer grain than soft limit
+ * and numainfo
+ */
+ if (unlikely(flags)) {
+ flags |= mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
#if MAX_NUMNODES > 1
- do_numainfo = mem_cgroup_event_ratelimit(memcg,
+ flags |= mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_NUMAINFO);
#endif
- preempt_enable();
+ }
+ preempt_enable();

+ if (unlikely(flags)) {
mem_cgroup_threshold(memcg);
- if (unlikely(do_softlimit))
+ if (unlikely(flags & MEM_CGROUP_TARGET_SOFTLIMIT))
mem_cgroup_update_tree(memcg, page);
#if MAX_NUMNODES > 1
- if (unlikely(do_numainfo))
+ if (unlikely(flags & MEM_CGROUP_TARGET_NUMAINFO))
atomic_inc(&memcg->numainfo_events);
#endif
- } else
- preempt_enable();
+ }
}

struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
--
1.7.7.3

--
Kirill A. Shutemov
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/