[PATCH] mm: memcontrol: decouple reference counting from page accounting

From: Johannes Weiner
Date: Thu Oct 31 2019 - 07:04:23 EST


The reference counting of a memcg is currently coupled directly to how
many 4k pages are charged to it. This doesn't work well with Roman's
new slab controller, which maintains pools of objects and doesn't want
to keep an extra balance sheet for the pages backing those objects.

This unusual refcounting design (reference counts usually track
pointers to an object) is only for historical reasons: memcg used to
not take any css references and simply stalled offlining until all
charges had been reparented and the page counters had dropped to
zero. When we got rid of the reparenting requirement, the simple
mechanical translation was to take a reference for every charge.

More historical context can be found in e8ea14cc6ead ("mm: memcontrol:
take a css reference for each charged page"), 64f219938941 ("mm:
memcontrol: remove obsolete kmemcg pinning tricks") and b2052564e66d
("mm: memcontrol: continue cache reclaim from offlined groups").

The new slab controller exposes the limitations in this scheme, so
let's switch it to a more idiomatic reference counting model based on
actual kernel pointers to the memcg:

- The per-cpu stock holds a reference to the memcg its caching

- User pages hold a reference for their page->mem_cgroup. Transparent
huge pages will no longer acquire tail references in advance, we'll
get them if needed during the split.

- Kernel pages hold a reference for their page->mem_cgroup

- mem_cgroup_try_charge(), if successful, will return one reference to
be consumed by page->mem_cgroup during commit, or put during cancel

- Pages allocated in the root cgroup will acquire and release css
references for simplicity. css_get() and css_put() optimize that.

- The current memcg_charge_slab() already hacked around the per-charge
references; this change gets rid of that as well.

Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
mm/memcontrol.c | 45 ++++++++++++++++++++++++++-------------------
mm/slab.h | 2 --
2 files changed, 26 insertions(+), 21 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c7e3e758c165..b88c273d6dc0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2190,13 +2190,17 @@ static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;

+ if (!old)
+ return;
+
if (stock->nr_pages) {
page_counter_uncharge(&old->memory, stock->nr_pages);
if (do_memsw_account())
page_counter_uncharge(&old->memsw, stock->nr_pages);
- css_put_many(&old->css, stock->nr_pages);
stock->nr_pages = 0;
}
+
+ css_put(&old->css);
stock->cached = NULL;
}

@@ -2232,6 +2236,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
drain_stock(stock);
+ css_get(&memcg->css);
stock->cached = memcg;
}
stock->nr_pages += nr_pages;
@@ -2635,12 +2640,10 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
page_counter_charge(&memcg->memsw, nr_pages);
- css_get_many(&memcg->css, nr_pages);

return 0;

done_restock:
- css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);

@@ -2677,8 +2680,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
page_counter_uncharge(&memcg->memory, nr_pages);
if (do_memsw_account())
page_counter_uncharge(&memcg->memsw, nr_pages);
-
- css_put_many(&memcg->css, nr_pages);
}

static void lock_page_lru(struct page *page, int *isolated)
@@ -2989,6 +2990,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
if (!ret) {
page->mem_cgroup = memcg;
__SetPageKmemcg(page);
+ return 0;
}
}
css_put(&memcg->css);
@@ -3026,12 +3028,11 @@ void __memcg_kmem_uncharge(struct page *page, int order)
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
__memcg_kmem_uncharge_memcg(memcg, nr_pages);
page->mem_cgroup = NULL;
+ css_put(&memcg->css);

/* slab pages do not have PageKmemcg flag set */
if (PageKmemcg(page))
__ClearPageKmemcg(page);
-
- css_put_many(&memcg->css, nr_pages);
}
#endif /* CONFIG_MEMCG_KMEM */

@@ -3043,15 +3044,18 @@ void __memcg_kmem_uncharge(struct page *page, int order)
*/
void mem_cgroup_split_huge_fixup(struct page *head)
{
+ struct mem_cgroup *memcg = head->mem_cgroup;
int i;

if (mem_cgroup_disabled())
return;

- for (i = 1; i < HPAGE_PMD_NR; i++)
- head[i].mem_cgroup = head->mem_cgroup;
+ for (i = 1; i < HPAGE_PMD_NR; i++) {
+ css_get(&memcg->css);
+ head[i].mem_cgroup = memcg;
+ }

- __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
+ __mod_memcg_state(memcg, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

@@ -5485,7 +5489,9 @@ static int mem_cgroup_move_account(struct page *page,
* uncharging, charging, migration, or LRU putback.
*/

- /* caller should have done css_get */
+ css_get(&to->css);
+ css_put(&from->css);
+
page->mem_cgroup = to;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -6514,8 +6520,10 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
memcg = get_mem_cgroup_from_mm(mm);

ret = try_charge(memcg, gfp_mask, nr_pages);
-
- css_put(&memcg->css);
+ if (ret) {
+ css_put(&memcg->css);
+ memcg = NULL;
+ }
out:
*memcgp = memcg;
return ret;
@@ -6611,6 +6619,8 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
return;

cancel_charge(memcg, nr_pages);
+
+ css_put(&memcg->css);
}

struct uncharge_gather {
@@ -6652,9 +6662,6 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);
-
- if (!mem_cgroup_is_root(ug->memcg))
- css_put_many(&ug->memcg->css, nr_pages);
}

static void uncharge_page(struct page *page, struct uncharge_gather *ug)
@@ -6702,6 +6709,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)

ug->dummy_page = page;
page->mem_cgroup = NULL;
+ css_put(&ug->memcg->css);
}

static void uncharge_list(struct list_head *page_list)
@@ -6810,8 +6818,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
page_counter_charge(&memcg->memsw, nr_pages);
- css_get_many(&memcg->css, nr_pages);

+ css_get(&memcg->css);
commit_charge(newpage, memcg, false);

local_irq_save(flags);
@@ -7059,8 +7067,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-nr_entries);
memcg_check_events(memcg, page);

- if (!mem_cgroup_is_root(memcg))
- css_put_many(&memcg->css, nr_entries);
+ css_put(&memcg->css);
}

/**
diff --git a/mm/slab.h b/mm/slab.h
index 2bbecf28688d..9f84298db2d7 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -372,9 +372,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);

- /* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
- css_put_many(&memcg->css, 1 << order);
out:
css_put(&memcg->css);
return ret;
--
2.23.0