[PATCH] mm/migrate: recover hotplug performance regression

From: Huang Ying
Date: Thu Sep 16 2021 - 04:51:44 EST


0-Day kernel test robot reported a -53.8% performance regression for
stress-ng memhotplug test case. This patch is to recover the
regression via avoid updating the demotion order if not necessary.

Refer: https://lore.kernel.org/lkml/20210905135932.GE15026@xsang-OptiPlex-9020/
Fixes: 884a6e5d1f93 ("mm/migrate: update node demotion order on hotplug events")
Signed-off-by: "Huang, Ying" <ying.huang@xxxxxxxxx>
Suggested-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Reported-by: kernel test robot <oliver.sang@xxxxxxxxx>
Cc: Yang Shi <shy828301@xxxxxxxxx>
Cc: Zi Yan <ziy@xxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: Wei Xu <weixugc@xxxxxxxxxx>
Cc: Oscar Salvador <osalvador@xxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
Cc: David Hildenbrand <david@xxxxxxxxxx>
Cc: Greg Thelen <gthelen@xxxxxxxxxx>
Cc: Keith Busch <kbusch@xxxxxxxxxx>
---
mm/migrate.c | 26 ++++++++++++++++++++++----
1 file changed, 22 insertions(+), 4 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 77d107a4577f..20d803707497 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1145,6 +1145,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
static int node_demotion[MAX_NUMNODES] __read_mostly =
{[0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE};

+static bool node_demotion_disabled __read_mostly;
+
/**
* next_demotion_node() - Get the next node in the demotion path
* @node: The starting node to lookup the next node
@@ -1158,6 +1160,8 @@ int next_demotion_node(int node)
{
int target;

+ if (node_demotion_disabled)
+ return NUMA_NO_NODE;
/*
* node_demotion[] is updated without excluding this
* function from running. RCU doesn't provide any
@@ -3198,13 +3202,26 @@ static void __set_migration_target_nodes(void)
goto again;
}

+static int nr_node_has_cpu;
+static int nr_node_has_mem;
+
+static void check_set_migration_target_nodes(void)
+{
+ if (num_node_state(N_MEMORY) != nr_node_has_mem ||
+ num_node_state(N_CPU) != nr_node_has_cpu) {
+ __set_migration_target_nodes();
+ nr_node_has_mem = num_node_state(N_MEMORY);
+ nr_node_has_cpu = num_node_state(N_CPU);
+ }
+}
+
/*
* For callers that do not hold get_online_mems() already.
*/
static void set_migration_target_nodes(void)
{
get_online_mems();
- __set_migration_target_nodes();
+ check_set_migration_target_nodes();
put_online_mems();
}

@@ -3249,7 +3266,7 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
* will leave migration disabled until the offline
* completes and the MEM_OFFLINE case below runs.
*/
- disable_all_migrate_targets();
+ node_demotion_disabled = true;
break;
case MEM_OFFLINE:
case MEM_ONLINE:
@@ -3257,14 +3274,15 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
* Recalculate the target nodes once the node
* reaches its final state (online or offline).
*/
- __set_migration_target_nodes();
+ check_set_migration_target_nodes();
+ node_demotion_disabled = false;
break;
case MEM_CANCEL_OFFLINE:
/*
* MEM_GOING_OFFLINE disabled all the migration
* targets. Reenable them.
*/
- __set_migration_target_nodes();
+ node_demotion_disabled = false;
break;
case MEM_GOING_ONLINE:
case MEM_CANCEL_ONLINE:
--
2.30.2