[PATCH 3/6] mm, memcg: Prevent memory.low load/store tearing

From: Chris Down
Date: Thu Mar 12 2020 - 13:33:06 EST


This can be set concurrently with reads, which may cause the wrong value
to be propagated.

Signed-off-by: Chris Down <chris@xxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Roman Gushchin <guro@xxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: linux-mm@xxxxxxxxx
Cc: cgroups@xxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: kernel-team@xxxxxx
---
mm/memcontrol.c | 4 ++--
mm/page_counter.c | 9 ++++++---
2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index aca2964ea494..c85a304fa4a1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6262,7 +6262,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
return MEMCG_PROT_NONE;

emin = memcg->memory.min;
- elow = memcg->memory.low;
+ elow = READ_ONCE(memcg->memory.low);

parent = parent_mem_cgroup(memcg);
/* No parent means a non-hierarchical mode on v1 memcg */
@@ -6291,7 +6291,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
if (elow && parent_elow) {
unsigned long low_usage, siblings_low_usage;

- low_usage = min(usage, memcg->memory.low);
+ low_usage = min(usage, READ_ONCE(memcg->memory.low));
siblings_low_usage = atomic_long_read(
&parent->memory.children_low_usage);

diff --git a/mm/page_counter.c b/mm/page_counter.c
index 50184929b61f..18b7f779f2e2 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -17,6 +17,7 @@ static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
unsigned long protected, old_protected;
+ unsigned long low;
long delta;

if (!c->parent)
@@ -34,8 +35,10 @@ static void propagate_protected_usage(struct page_counter *c,
atomic_long_add(delta, &c->parent->children_min_usage);
}

- if (c->low || atomic_long_read(&c->low_usage)) {
- if (usage <= c->low)
+ low = READ_ONCE(c->low);
+
+ if (low || atomic_long_read(&c->low_usage)) {
+ if (usage <= low)
protected = usage;
else
protected = 0;
@@ -231,7 +234,7 @@ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;

- counter->low = nr_pages;
+ WRITE_ONCE(counter->low, nr_pages);

for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage));
--
2.25.1