alloc_buddy_hugetlb_folio() allocates a rmappable folio, then strips the
rmappable part and freezes it.
We can simplify all that by allocating frozen pages directly.
Suggested-by: Vlastimil Babka <vbabka@xxxxxxx>
Signed-off-by: Oscar Salvador <osalvador@xxxxxxx>
---
mm/hugetlb.c | 17 +----------------
1 file changed, 1 insertion(+), 16 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e3e6ac991b9c..83fa2b9f6fc4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1950,7 +1950,6 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
int order = huge_page_order(h);
struct folio *folio;
bool alloc_try_hard = true;
- bool retry = true;
/*
* By default we always try hard to allocate the folio with
@@ -1965,22 +1964,8 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
-retry:
- folio = __folio_alloc(gfp_mask, order, nid, nmask);
- /* Ensure hugetlb folio won't have large_rmappable flag set. */
- if (folio)
- folio_clear_large_rmappable(folio);
- if (folio && !folio_ref_freeze(folio, 1)) {
- folio_put(folio);
- if (retry) { /* retry once */
- retry = false;
- goto retry;
- }
- /* WOW! twice in a row. */
- pr_warn("HugeTLB unexpected inflated folio ref count\n");
- folio = NULL;
- }
+ folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
/*
* If we did not specify __GFP_RETRY_MAYFAIL, but still got a