Re: [PATCH -rt] memcg: use migrate_disable()/migrate_enable( ) inmemcg_check_events()
From: Thomas Gleixner
Date: Wed Nov 16 2011 - 12:02:45 EST
On Wed, 16 Nov 2011, Steven Rostedt wrote:
> On Wed, 2011-11-16 at 17:16 +0800, Yong Zhang wrote:
> > Looking at commit 4799401f [memcg: Fix race condition in
> > memcg_check_events() with this_cpu usage], we just want
> > to disable migration. So use the right API in -rt. This
> > will cure below warning.
> No this won't work. Not even for -rt. If we disable migration but not
> preemption, then two tasks can take this path. And the checks in
> __memcg_event_check() will be corrupted because nothing is protecting
> the updates from two tasks going into the same path.
>
> Perhaps a local_lock would work.
Yes, that's the only sensible option for now. Untested patch below.
Thanks,
tglx
-------------->
mm/memcontrol.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
Index: linux-3.2/mm/memcontrol.c
===================================================================
--- linux-3.2.orig/mm/memcontrol.c
+++ linux-3.2/mm/memcontrol.c
@@ -49,6 +49,8 @@
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
#include <linux/oom.h>
+#include <linux/locallock.h>
+
#include "internal.h"
#include <asm/uaccess.h>
@@ -363,6 +365,8 @@ enum charge_type {
#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
+static DEFINE_LOCAL_IRQ_LOCK(stats_lock);
+
static void mem_cgroup_get(struct mem_cgroup *memcg);
static void mem_cgroup_put(struct mem_cgroup *memcg);
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
@@ -619,7 +623,7 @@ static unsigned long mem_cgroup_read_eve
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
bool file, int nr_pages)
{
- preempt_disable();
+ local_lock(stats_lock);
if (file)
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
@@ -638,7 +642,7 @@ static void mem_cgroup_charge_statistics
__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
- preempt_enable();
+ local_unlock(stats_lock);
}
unsigned long
@@ -722,7 +726,7 @@ static void __mem_cgroup_target_update(s
*/
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
{
- preempt_disable();
+ local_lock(stats_lock);
/* threshold event is triggered in finer grain than soft limit */
if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
mem_cgroup_threshold(memcg);
@@ -742,7 +746,7 @@ static void memcg_check_events(struct me
}
#endif
}
- preempt_enable();
+ local_unlock(stats_lock);
}
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
@@ -2608,10 +2612,10 @@ static int mem_cgroup_move_account(struc
if (PageCgroupFileMapped(pc)) {
/* Update mapped_file data for mem_cgroup */
- preempt_disable();
+ local_lock(stats_lock);
__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- preempt_enable();
+ local_unlock(stats_lock);
}
mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
if (uncharge)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/