[PATCH 5.7 163/179] mm/hugetlb: avoid hardcoding while checking if cma is enabled

From: Greg Kroah-Hartman
Date: Mon Jul 27 2020 - 10:25:42 EST


From: Barry Song <song.bao.hua@xxxxxxxxxxxxx>

commit dbda8feadfa46b3d8dd7a2304f84ccbc036effe9 upstream.

hugetlb_cma[0] can be NULL due to various reasons, for example, node0
has no memory. so NULL hugetlb_cma[0] doesn't necessarily mean cma is
not enabled. gigantic pages might have been reserved on other nodes.
This patch fixes possible double reservation and CMA leak.

[akpm@xxxxxxxxxxxxxxxxxxxx: fix CONFIG_CMA=n warning]
[sfr@xxxxxxxxxxxxxxxx: better checks before using hugetlb_cma]
Link: http://lkml.kernel.org/r/20200721205716.6dbaa56b@xxxxxxxxxxxxxxxx

Fixes: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
Signed-off-by: Barry Song <song.bao.hua@xxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
Acked-by: Roman Gushchin <guro@xxxxxx>
Cc: Jonathan Cameron <jonathan.cameron@xxxxxxxxxx>
Cc: <stable@xxxxxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/20200710005726.36068-1-song.bao.hua@xxxxxxxxxxxxx
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
mm/hugetlb.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)

--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -46,7 +46,10 @@ int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];

+#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES];
+#endif
+static unsigned long hugetlb_cma_size __initdata;

/*
* Minimum page order among possible hugepage sizes, set to a proper value
@@ -1236,9 +1239,10 @@ static void free_gigantic_page(struct pa
* If the page isn't allocated using the cma allocator,
* cma_release() returns false.
*/
- if (IS_ENABLED(CONFIG_CMA) &&
- cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
+#ifdef CONFIG_CMA
+ if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
return;
+#endif

free_contig_range(page_to_pfn(page), 1 << order);
}
@@ -1249,7 +1253,8 @@ static struct page *alloc_gigantic_page(
{
unsigned long nr_pages = 1UL << huge_page_order(h);

- if (IS_ENABLED(CONFIG_CMA)) {
+#ifdef CONFIG_CMA
+ {
struct page *page;
int node;

@@ -1263,6 +1268,7 @@ static struct page *alloc_gigantic_page(
return page;
}
}
+#endif

return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
}
@@ -2572,7 +2578,7 @@ static void __init hugetlb_hstate_alloc_

for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) {
- if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) {
+ if (hugetlb_cma_size) {
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
break;
}
@@ -5548,7 +5554,6 @@ void move_hugetlb_state(struct page *old
}

#ifdef CONFIG_CMA
-static unsigned long hugetlb_cma_size __initdata;
static bool cma_reserve_called __initdata;

static int __init cmdline_parse_hugetlb_cma(char *p)