[PATCH] mm/hugetlb: hide nr_nodes in the internal of for_each_node_mask_to_[alloc|free]

From: Wei Yang
Date: Tue Jul 14 2020 - 03:34:16 EST


The second parameter of for_each_node_mask_to_[alloc|free] is a loop
variant, which is not used outside of loop iteration.

Let's hide this.

Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxxxxxxxxxx>
---
mm/hugetlb.c | 38 ++++++++++++++++++++------------------
1 file changed, 20 insertions(+), 18 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 57ece74e3aae..9c3d15fb317e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1196,17 +1196,19 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
return nid;
}

-#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
- for (nr_nodes = nodes_weight(*mask); \
- nr_nodes > 0 && \
+#define for_each_node_mask_to_alloc(hs, node, mask) \
+ int __nr_nodes; \
+ for (__nr_nodes = nodes_weight(*mask); \
+ __nr_nodes > 0 && \
((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
- nr_nodes--)
+ __nr_nodes--)

-#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
- for (nr_nodes = nodes_weight(*mask); \
- nr_nodes > 0 && \
+#define for_each_node_mask_to_free(hs, node, mask) \
+ int __nr_nodes; \
+ for (__nr_nodes = nodes_weight(*mask); \
+ __nr_nodes > 0 && \
((node = hstate_next_node_to_free(hs, mask)) || 1); \
- nr_nodes--)
+ __nr_nodes--)

#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_page(struct page *page,
@@ -1403,7 +1405,7 @@ static void __free_huge_page(struct page *page)
* reservation. If the page was associated with a subpool, there
* would have been a page reserved in the subpool before allocation
* via hugepage_subpool_get_pages(). Since we are 'restoring' the
- * reservtion, do not call hugepage_subpool_put_pages() as this will
+ * reservation, do not call hugepage_subpool_put_pages() as this will
* remove the reserved page from the subpool.
*/
if (!restore_reserve) {
@@ -1760,10 +1762,10 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
nodemask_t *node_alloc_noretry)
{
struct page *page;
- int nr_nodes, node;
+ int node;
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;

- for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+ for_each_node_mask_to_alloc(h, node, nodes_allowed) {
page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
node_alloc_noretry);
if (page)
@@ -1787,10 +1789,10 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
bool acct_surplus)
{
- int nr_nodes, node;
+ int node;
int ret = 0;

- for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+ for_each_node_mask_to_free(h, node, nodes_allowed) {
/*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
@@ -2481,9 +2483,9 @@ int alloc_bootmem_huge_page(struct hstate *h)
int __alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes, node;
+ int node;

- for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
+ for_each_node_mask_to_alloc(h, node, &node_states[N_MEMORY]) {
void *addr;

addr = memblock_alloc_try_nid_raw(
@@ -2662,17 +2664,17 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count,
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{
- int nr_nodes, node;
+ int node;

VM_BUG_ON(delta != -1 && delta != 1);

if (delta < 0) {
- for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+ for_each_node_mask_to_alloc(h, node, nodes_allowed) {
if (h->surplus_huge_pages_node[node])
goto found;
}
} else {
- for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+ for_each_node_mask_to_free(h, node, nodes_allowed) {
if (h->surplus_huge_pages_node[node] <
h->nr_huge_pages_node[node])
goto found;
--
2.20.1 (Apple Git-117)