[PATCH v2 4/5] mm/khugepaged: Convert alloc_charge_hpage() to use folios
From: Vishal Moola (Oracle)
Date: Wed Oct 18 2023 - 16:33:24 EST
Also remove count_memcg_page_event now that its last caller no longer uses it,
and convert hpage_collapse_alloc_page() to hpage_collapse_alloc_folio(),
This removes 1 call to compound_head() and helps convert khugepaged to
use folios throughout.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx>
---
include/linux/memcontrol.h | 14 --------------
mm/khugepaged.c | 15 ++++++++-------
2 files changed, 8 insertions(+), 21 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab94ad4597d0..3126bde982e8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1080,15 +1080,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
-{
- struct mem_cgroup *memcg = page_memcg(page);
-
- if (memcg)
- count_memcg_events(memcg, idx, 1);
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1565,11 +1556,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
-{
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9efd8ff68f06..d49aa22d99c9 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
}
#endif
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
nodemask_t *nmask)
{
- *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
- if (unlikely(!*hpage)) {
+ *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
+
+ if (unlikely(!*folio)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return false;
}
- folio_prep_large_rmappable((struct folio *)*hpage);
count_vm_event(THP_COLLAPSE_ALLOC);
return true;
}
@@ -1064,17 +1064,18 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
int node = hpage_collapse_find_target_node(cc);
struct folio *folio;
- if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+ if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask))
return SCAN_ALLOC_HUGE_PAGE_FAIL;
- folio = page_folio(*hpage);
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
*hpage = NULL;
return SCAN_CGROUP_CHARGE_FAIL;
}
- count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+ count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
+
+ *hpage = folio_page(folio, 0);
return SCAN_SUCCEED;
}
--
2.40.1