Re: [PATCH v3 4/5] mm/khugepaged: Convert alloc_charge_hpage() to use folios

From: Yang Shi
Date: Tue Oct 24 2023 - 13:40:05 EST


On Fri, Oct 20, 2023 at 11:34 AM Vishal Moola (Oracle)
<vishal.moola@xxxxxxxxx> wrote:
>
> Also remove count_memcg_page_event now that its last caller no longer uses
> it and reword hpage_collapse_alloc_page() to hpage_collapse_alloc_folio().
>
> This removes 1 call to compound_head() and helps convert khugepaged to
> use folios throughout.
>
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx>

Reviewed-by: Yang Shi <shy828301@xxxxxxxxx>

> ---
> include/linux/memcontrol.h | 14 --------------
> mm/khugepaged.c | 17 ++++++++++-------
> 2 files changed, 10 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index ab94ad4597d0..3126bde982e8 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1080,15 +1080,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
> local_irq_restore(flags);
> }
>
> -static inline void count_memcg_page_event(struct page *page,
> - enum vm_event_item idx)
> -{
> - struct mem_cgroup *memcg = page_memcg(page);
> -
> - if (memcg)
> - count_memcg_events(memcg, idx, 1);
> -}
> -
> static inline void count_memcg_folio_events(struct folio *folio,
> enum vm_event_item idx, unsigned long nr)
> {
> @@ -1565,11 +1556,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
> {
> }
>
> -static inline void count_memcg_page_event(struct page *page,
> - int idx)
> -{
> -}
> -
> static inline void count_memcg_folio_events(struct folio *folio,
> enum vm_event_item idx, unsigned long nr)
> {
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 9efd8ff68f06..6a7184cd291b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -888,16 +888,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
> }
> #endif
>
> -static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
> +static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
> nodemask_t *nmask)
> {
> - *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
> - if (unlikely(!*hpage)) {
> + *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
> +
> + if (unlikely(!*folio)) {
> count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> return false;
> }
>
> - folio_prep_large_rmappable((struct folio *)*hpage);
> count_vm_event(THP_COLLAPSE_ALLOC);
> return true;
> }
> @@ -1064,17 +1064,20 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
> int node = hpage_collapse_find_target_node(cc);
> struct folio *folio;
>
> - if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
> + if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
> + *hpage = NULL;
> return SCAN_ALLOC_HUGE_PAGE_FAIL;
> + }
>
> - folio = page_folio(*hpage);
> if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
> folio_put(folio);
> *hpage = NULL;
> return SCAN_CGROUP_CHARGE_FAIL;
> }
> - count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
>
> + count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
> +
> + *hpage = folio_page(folio, 0);
> return SCAN_SUCCEED;
> }
>
> --
> 2.40.1
>