[PATCH v1 1/7] mm/memcontrol: move locking page out of mem_cgroup_move_account

From: Konstantin Khlebnikov
Date: Wed Sep 04 2019 - 09:53:17 EST


Required for calling mem_cgroup_move_account() for already locked page.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
---
mm/memcontrol.c | 64 +++++++++++++++++++++++++++----------------------------
1 file changed, 31 insertions(+), 33 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9ec5e12486a7..40ddc233e973 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5135,7 +5135,8 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
*
- * The caller must make sure the page is not on LRU (isolate_page() is useful.)
+ * The caller must lock the page and make sure it is not on LRU
+ * (isolate_page() is useful.)
*
* This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
* from old cgroup.
@@ -5147,24 +5148,15 @@ static int mem_cgroup_move_account(struct page *page,
{
unsigned long flags;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
- int ret;
bool anon;

VM_BUG_ON(from == to);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON(compound && !PageTransHuge(page));

- /*
- * Prevent mem_cgroup_migrate() from looking at
- * page->mem_cgroup of its source page while we change it.
- */
- ret = -EBUSY;
- if (!trylock_page(page))
- goto out;
-
- ret = -EINVAL;
if (page->mem_cgroup != from)
- goto out_unlock;
+ return -EINVAL;

anon = PageAnon(page);

@@ -5204,18 +5196,14 @@ static int mem_cgroup_move_account(struct page *page,
page->mem_cgroup = to;
spin_unlock_irqrestore(&from->move_lock, flags);

- ret = 0;
-
local_irq_disable();
mem_cgroup_charge_statistics(to, page, compound, nr_pages);
memcg_check_events(to, page);
mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
memcg_check_events(from, page);
local_irq_enable();
-out_unlock:
- unlock_page(page);
-out:
- return ret;
+
+ return 0;
}

/**
@@ -5535,36 +5523,42 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
- enum mc_target_type target_type;
union mc_target target;
struct page *page;

ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
+ bool device = false;
+
if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(ptl);
return 0;
}
- target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
- if (target_type == MC_TARGET_PAGE) {
- page = target.page;
- if (!isolate_lru_page(page)) {
- if (!mem_cgroup_move_account(page, true,
- mc.from, mc.to)) {
- mc.precharge -= HPAGE_PMD_NR;
- mc.moved_charge += HPAGE_PMD_NR;
- }
- putback_lru_page(page);
- }
- put_page(page);
- } else if (target_type == MC_TARGET_DEVICE) {
+
+ switch (get_mctgt_type_thp(vma, addr, *pmd, &target)) {
+ case MC_TARGET_DEVICE:
+ device = true;
+ /* fall through */
+ case MC_TARGET_PAGE:
page = target.page;
+ if (!trylock_page(page))
+ goto put_huge;
+ if (!device && isolate_lru_page(page))
+ goto unlock_huge;
if (!mem_cgroup_move_account(page, true,
mc.from, mc.to)) {
mc.precharge -= HPAGE_PMD_NR;
mc.moved_charge += HPAGE_PMD_NR;
}
+ if (!device)
+ putback_lru_page(page);
+unlock_huge:
+ unlock_page(page);
+put_huge:
put_page(page);
+ break;
+ default:
+ break;
}
spin_unlock(ptl);
return 0;
@@ -5596,8 +5590,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
*/
if (PageTransCompound(page))
goto put;
- if (!device && isolate_lru_page(page))
+ if (!trylock_page(page))
goto put;
+ if (!device && isolate_lru_page(page))
+ goto unlock;
if (!mem_cgroup_move_account(page, false,
mc.from, mc.to)) {
mc.precharge--;
@@ -5606,6 +5602,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
}
if (!device)
putback_lru_page(page);
+unlock:
+ unlock_page(page);
put: /* get_mctgt_type() gets the page */
put_page(page);
break;