[RFC PATCH 8/8] sched: drop for_each_numa_hop_mask()

From: Yury Norov
Date: Sat Mar 25 2023 - 14:56:15 EST


Now that we have for_each_numa_cpu(), for_each_numa_hop_mask()
and all related code is a deadcode. Drop it.

Signed-off-by: Yury Norov <yury.norov@xxxxxxxxx>
---
This is RFC because despite that the code below is unused, there may be
new users, particularly Intel, and I don't want to cut people on the fly.
Those interested in for_each_numa_hop_mask(), please send NAKs.

include/linux/topology.h | 25 -------------------------
kernel/sched/topology.c | 32 --------------------------------
lib/test_bitmap.c | 13 -------------
3 files changed, 70 deletions(-)

diff --git a/include/linux/topology.h b/include/linux/topology.h
index 3d8d486c817d..d2defd59b2d0 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -255,7 +255,6 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
#ifdef CONFIG_NUMA
int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsigned int *hop);
-extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
#else
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
@@ -267,32 +266,8 @@ int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsi
{
return cpumask_next(cpu, cpus);
}
-
-static inline const struct cpumask *
-sched_numa_hop_mask(unsigned int node, unsigned int hops)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
#endif /* CONFIG_NUMA */

-/**
- * for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
- * from a given node.
- * @mask: the iteration variable.
- * @node: the NUMA node to start the search from.
- *
- * Requires rcu_lock to be held.
- *
- * Yields cpu_online_mask for @node == NUMA_NO_NODE.
- */
-#define for_each_numa_hop_mask(mask, node) \
- for (unsigned int __hops = 0; \
- mask = (node != NUMA_NO_NODE || __hops) ? \
- sched_numa_hop_mask(node, __hops) : \
- cpu_online_mask, \
- !IS_ERR_OR_NULL(mask); \
- __hops++)
-
/**
* for_each_numa_cpu - iterate over cpus in increasing order taking into account
* NUMA distances from a given node.
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 5f5f994a56da..2842a4d10624 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2171,38 +2171,6 @@ int sched_numa_find_next_cpu(const struct cpumask *cpus, int cpu, int node, unsi
}
EXPORT_SYMBOL_GPL(sched_numa_find_next_cpu);

-/**
- * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
- * @node
- * @node: The node to count hops from.
- * @hops: Include CPUs up to that many hops away. 0 means local node.
- *
- * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
- * @node, an error value otherwise.
- *
- * Requires rcu_lock to be held. Returned cpumask is only valid within that
- * read-side section, copy it if required beyond that.
- *
- * Note that not all hops are equal in distance; see sched_init_numa() for how
- * distances and masks are handled.
- * Also note that this is a reflection of sched_domains_numa_masks, which may change
- * during the lifetime of the system (offline nodes are taken out of the masks).
- */
-const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
-{
- struct cpumask ***masks;
-
- if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
- return ERR_PTR(-EINVAL);
-
- masks = rcu_dereference(sched_domains_numa_masks);
- if (!masks)
- return ERR_PTR(-EBUSY);
-
- return masks[hops][node];
-}
-EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
-
#endif /* CONFIG_NUMA */

static int __sdt_alloc(const struct cpumask *cpu_map)
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 1b5f805f6879..6becb044a66f 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -756,19 +756,6 @@ static void __init test_for_each_numa(void)
{
unsigned int cpu, node;

- for (node = 0; node < sched_domains_numa_levels; node++) {
- const struct cpumask *m, *p = cpu_none_mask;
- unsigned int c = 0;
-
- rcu_read_lock();
- for_each_numa_hop_mask(m, node) {
- for_each_cpu_andnot(cpu, m, p)
- expect_eq_uint(cpumask_local_spread(c++, node), cpu);
- p = m;
- }
- rcu_read_unlock();
- }
-
for (node = 0; node < sched_domains_numa_levels; node++) {
unsigned int hop, c = 0;

--
2.34.1