[tip: x86/cpu] x86/cacheinfo: Separate amd_northbridge from _cpuid4_info_regs

From: tip-bot2 for Ahmed S. Darwish
Date: Tue Mar 25 2025 - 05:41:12 EST


The following commit has been merged into the x86/cpu branch of tip:

Commit-ID: c58ed2d4da8dced3fa4505f498bd393f565b471a
Gitweb: https://git.kernel.org/tip/c58ed2d4da8dced3fa4505f498bd393f565b471a
Author: Ahmed S. Darwish <darwi@xxxxxxxxxxxxx>
AuthorDate: Mon, 24 Mar 2025 14:33:07 +01:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Tue, 25 Mar 2025 10:22:36 +01:00

x86/cacheinfo: Separate amd_northbridge from _cpuid4_info_regs

'struct _cpuid4_info_regs' is meant to hold the CPUID leaf 0x4
output registers (EAX, EBX, and ECX), as well as derived information
such as the cache node ID and size.

It also contains a reference to amd_northbridge, which is there only to
be "parked" until ci_info_init() can store it in the priv pointer of the
<linux/cacheinfo.h> API. That priv pointer is then used by AMD-specific
L3 cache_disable_0/1 sysfs attributes.

Decouple amd_northbridge from _cpuid4_info_regs and pass it explicitly
through the functions at x86/cacheinfo. Doing so clarifies when
amd_northbridge is actually needed (AMD-only code) and when it is
not (Intel-specific code). It also prepares for moving the AMD-specific
L3 cache_disable_0/1 sysfs code into its own file in next commit.

Signed-off-by: Ahmed S. Darwish <darwi@xxxxxxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Link: https://lore.kernel.org/r/20250324133324.23458-13-darwi@xxxxxxxxxxxxx
---
arch/x86/kernel/cpu/cacheinfo.c | 45 ++++++++++++++++++++------------
1 file changed, 29 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index f1055e8..8c2b51b 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -168,7 +168,6 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_ecx ecx;
unsigned int id;
unsigned long size;
- struct amd_northbridge *nb;
};

/* AMD doesn't have CPUID4. Emulate it here to report the same
@@ -573,25 +572,36 @@ cache_get_priv_group(struct cacheinfo *ci)
return &cache_private_group;
}

-static void amd_init_l3_cache(struct _cpuid4_info_regs *id4, int index)
+static struct amd_northbridge *amd_init_l3_cache(int index)
{
+ struct amd_northbridge *nb;
int node;

/* only for L3, and not in virtualized environments */
if (index < 3)
- return;
+ return NULL;

node = topology_amd_node_id(smp_processor_id());
- id4->nb = node_to_amd_nb(node);
- if (id4->nb && !id4->nb->l3_cache.indices)
- amd_calc_l3_indices(id4->nb);
+ nb = node_to_amd_nb(node);
+ if (nb && !nb->l3_cache.indices)
+ amd_calc_l3_indices(nb);
+
+ return nb;
}
#else
-#define amd_init_l3_cache(x, y)
+static struct amd_northbridge *amd_init_l3_cache(int index)
+{
+ return NULL;
+}
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */

-static int
-cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *id4)
+/*
+ * Fill passed _cpuid4_info_regs structure.
+ * Intel-only code paths should pass NULL for the amd_northbridge
+ * return pointer.
+ */
+static int cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *id4,
+ struct amd_northbridge **nb)
{
u8 cpu_vendor = boot_cpu_data.x86_vendor;
union _cpuid4_leaf_eax eax;
@@ -607,7 +617,9 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *id4)
/* Legacy AMD fallback */
amd_cpuid4(index, &eax, &ebx, &ecx);
}
- amd_init_l3_cache(id4, index);
+
+ if (nb)
+ *nb = amd_init_l3_cache(index);
} else {
/* Intel */
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
@@ -758,7 +770,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
struct _cpuid4_info_regs id4 = {};
int retval;

- retval = cpuid4_cache_lookup_regs(i, &id4);
+ retval = cpuid4_cache_lookup_regs(i, &id4, NULL);
if (retval < 0)
continue;

@@ -934,8 +946,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
}
}

-static void ci_info_init(struct cacheinfo *ci,
- const struct _cpuid4_info_regs *id4)
+static void ci_info_init(struct cacheinfo *ci, const struct _cpuid4_info_regs *id4,
+ struct amd_northbridge *nb)
{
ci->id = id4->id;
ci->attributes = CACHE_ID;
@@ -946,7 +958,7 @@ static void ci_info_init(struct cacheinfo *ci,
ci->size = id4->size;
ci->number_of_sets = id4->ecx.split.number_of_sets + 1;
ci->physical_line_partition = id4->ebx.split.physical_line_partition + 1;
- ci->priv = id4->nb;
+ ci->priv = nb;
}

int init_cache_level(unsigned int cpu)
@@ -982,13 +994,14 @@ int populate_cache_leaves(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *ci = this_cpu_ci->info_list;
struct _cpuid4_info_regs id4 = {};
+ struct amd_northbridge *nb;

for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
- ret = cpuid4_cache_lookup_regs(idx, &id4);
+ ret = cpuid4_cache_lookup_regs(idx, &id4, &nb);
if (ret)
return ret;
get_cache_id(cpu, &id4);
- ci_info_init(ci++, &id4);
+ ci_info_init(ci++, &id4, nb);
__cache_cpumap_setup(cpu, idx, &id4);
}
this_cpu_ci->cpu_map_populated = true;