[RFC v1 07/19] mm, vmscan: use first_node in throttle_direct_reclaim()
From: Pengfei Li
Date: Thu Nov 21 2019 - 10:20:47 EST
In throttle_direct_reclaim(), we want to access first node instead of
first zone, so use first_node_nlist instead of first_zone_zonelist.
Signed-off-by: Pengfei Li <fly@xxxxxxxxxxx>
---
mm/vmscan.c | 41 +++++++++++++++++++----------------------
1 file changed, 19 insertions(+), 22 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 159a2aaa8db1..7554c8ba0841 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3166,9 +3166,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
static bool throttle_direct_reclaim(gfp_t gfp_mask, struct nodelist *nodelist,
nodemask_t *nodemask)
{
- struct nlist_traverser t;
- struct zone *zone;
- pg_data_t *pgdat = NULL;
+ pg_data_t *pgdat;
+ enum zone_type high_idx;
+ int node;
/*
* Kernel threads should not be throttled as they may be indirectly
@@ -3178,21 +3178,24 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct nodelist *nodelist,
* processes to block on log_wait_commit().
*/
if (current->flags & PF_KTHREAD)
- goto out;
+ return false;
/*
* If a fatal signal is pending, this process should not throttle.
* It should return quickly so it can exit and free its memory
*/
if (fatal_signal_pending(current))
- goto out;
+ return false;
/*
* Check if the pfmemalloc reserves are ok by finding the first node
* with a usable ZONE_NORMAL or lower zone. The expectation is that
* GFP_KERNEL will be required for allocating network buffers when
* swapping over the network so ZONE_HIGHMEM is unusable.
- *
+ */
+ high_idx = min_t(enum zone_type, ZONE_NORMAL, gfp_zone(gfp_mask));
+
+ /*
* Throttling is based on the first usable node and throttled processes
* wait on a queue until kswapd makes progress and wakes them. There
* is an affinity then between processes waking up and where reclaim
@@ -3201,21 +3204,16 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct nodelist *nodelist,
* for remote pfmemalloc reserves and processes on different nodes
* should make reasonable progress.
*/
- for_each_zone_nlist_nodemask(zone, &t, nodelist,
- gfp_zone(gfp_mask), nodemask) {
- if (zone_idx(zone) > ZONE_NORMAL)
- continue;
-
- /* Throttle based on the first usable node */
- pgdat = zone->zone_pgdat;
- if (allow_direct_reclaim(pgdat))
- goto out;
- break;
- }
+ node = first_node_nlist_nodemask(nodelist, high_idx, nodemask);
/* If no zone was usable by the allocation flags then do not throttle */
- if (!pgdat)
- goto out;
+ if (node == NUMA_NO_NODE)
+ return false;
+
+ pgdat = NODE_DATA(node);
+ /* Throttle based on the first usable node */
+ if (allow_direct_reclaim(pgdat))
+ return false;
/* Account for the throttling */
count_vm_event(PGSCAN_DIRECT_THROTTLE);
@@ -3236,14 +3234,13 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct nodelist *nodelist,
}
/* Throttle until kswapd wakes the process */
- wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
- allow_direct_reclaim(pgdat));
+ wait_event_killable(pgdat->pfmemalloc_wait,
+ allow_direct_reclaim(pgdat));
check_pending:
if (fatal_signal_pending(current))
return true;
-out:
return false;
}
--
2.23.0