[patch V3 20/40] x86/cpu/topology: Cure the abuse of cpuinfo for persisting logical ids
From: Thomas Gleixner
Date: Wed Aug 02 2023 - 06:23:18 EST
Per CPU cpuinfo is used to persist the logical package and die IDs. That's
really not the right place simply because cpuinfo is subject to be
reinitialized when a CPU goes through an offline/online cycle.
This works by chance today, but that's far from correct and neither obvious
nor documented.
Add a per cpu datastructure which persists those logical IDs, which allows
to cleanup the CPUID evaluation code.
This is a temporary workaround until the larger topology management is in
place, which makes all of this logical management mechanics obsolete.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/kernel/smpboot.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -124,7 +124,20 @@ struct mwait_cpu_dead {
*/
static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
-/* Logical package management. We might want to allocate that dynamically */
+/* Logical package management. */
+struct logical_maps {
+ u32 phys_pkg_id;
+ u32 phys_die_id;
+ u32 logical_pkg_id;
+ u32 logical_die_id;
+};
+
+/* Temporary workaround until the full topology mechanics is in place */
+static DEFINE_PER_CPU_READ_MOSTLY(struct logical_maps, logical_maps) = {
+ .phys_pkg_id = U32_MAX,
+ .phys_die_id = U32_MAX,
+};
+
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
static unsigned int logical_packages __read_mostly;
@@ -345,10 +358,8 @@ int topology_phys_to_logical_pkg(unsigne
int cpu;
for_each_possible_cpu(cpu) {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- if (c->initialized && c->topo.pkg_id == phys_pkg)
- return c->topo.logical_pkg_id;
+ if (per_cpu(logical_maps.phys_pkg_id, cpu) == phys_pkg)
+ return per_cpu(logical_maps.logical_pkg_id, cpu);
}
return -1;
}
@@ -366,11 +377,9 @@ static int topology_phys_to_logical_die(
int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id;
for_each_possible_cpu(cpu) {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- if (c->initialized && c->topo.die_id == die_id &&
- c->topo.pkg_id == proc_id)
- return c->topo.logical_die_id;
+ if (per_cpu(logical_maps.phys_pkg_id, cpu) == proc_id &&
+ per_cpu(logical_maps.phys_die_id, cpu) == die_id)
+ return per_cpu(logical_maps.logical_die_id, cpu);
}
return -1;
}
@@ -395,6 +404,8 @@ int topology_update_package_map(unsigned
cpu, pkg, new);
}
found:
+ per_cpu(logical_maps.phys_pkg_id, cpu) = pkg;
+ per_cpu(logical_maps.logical_pkg_id, cpu) = new;
cpu_data(cpu).topo.logical_pkg_id = new;
return 0;
}
@@ -418,6 +429,8 @@ int topology_update_die_map(unsigned int
cpu, die, new);
}
found:
+ per_cpu(logical_maps.phys_die_id, cpu) = die;
+ per_cpu(logical_maps.logical_die_id, cpu) = new;
cpu_data(cpu).topo.logical_die_id = new;
return 0;
}