[PATCH v3 07/17] x86/mce: Define BSP-only init

From: Yazen Ghannam
Date: Tue Apr 15 2025 - 10:56:17 EST


Currently, MCA initialization is executed identically on each CPU as
they are brought online. However, a number of MCA initialization tasks
only need to be done once.

Define a function to collect all 'global' init tasks and call this from
the BSP only. Start with CPU features.

Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@xxxxxxxxx>
Tested-by: Tony Luck <tony.luck@xxxxxxxxx>
Reviewed-by: Tony Luck <tony.luck@xxxxxxxxx>
Signed-off-by: Yazen Ghannam <yazen.ghannam@xxxxxxx>
---

Notes:
Link:
https://lore.kernel.org/r/20250213-wip-mca-updates-v2-7-3636547fe05f@xxxxxxx

v2->v3:
* Add tags from Qiuxu and Tony.

v1->v2:
* New in v2.

arch/x86/include/asm/mce.h | 2 ++
arch/x86/kernel/cpu/common.c | 1 +
arch/x86/kernel/cpu/mce/amd.c | 3 ---
arch/x86/kernel/cpu/mce/core.c | 29 ++++++++++++++++++++++-------
4 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 3224f3862dc8..0108f69ec46a 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -241,12 +241,14 @@ struct cper_ia_proc_ctx;

#ifdef CONFIG_X86_MCE
int mcheck_init(void);
+void cpu_mca_init(struct cpuinfo_x86 *c);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
void mcheck_cpu_clear(struct cpuinfo_x86 *c);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id);
#else
static inline int mcheck_init(void) { return 0; }
+static inline void cpu_mca_init(struct cpuinfo_x86 *c) {}
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 079ded4eeb86..8e3e51281f12 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1690,6 +1690,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_clear_cpu_cap(X86_FEATURE_LA57);

detect_nopl();
+ cpu_mca_init(c);
}

void __init init_cpu_devs(void)
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index aa23139a3092..206973d7dbcc 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -656,9 +656,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
u32 low = 0, high = 0, address = 0;
int offset = -1;

- mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV);
- mce_flags.succor = cpu_feature_enabled(X86_FEATURE_SUCCOR);
- mce_flags.smca = cpu_feature_enabled(X86_FEATURE_SMCA);
mce_flags.amd_threshold = 1;

for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 331cd8984395..d0a29e22cab0 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1832,13 +1832,6 @@ static void __mcheck_cpu_cap_init(void)
this_cpu_write(mce_num_banks, b);

__mcheck_cpu_mce_banks_init();
-
- /* Use accurate RIP reporting if available. */
- if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
- mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
-
- if (cap & MCG_SER_P)
- mca_cfg.ser = 1;
}

static void __mcheck_cpu_init_generic(void)
@@ -2238,6 +2231,28 @@ DEFINE_IDTENTRY_RAW(exc_machine_check)
}
#endif

+/* Called only on the boot CPU. */
+void cpu_mca_init(struct cpuinfo_x86 *c)
+{
+ u64 cap;
+
+ if (!mce_available(c))
+ return;
+
+ mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV);
+ mce_flags.succor = cpu_feature_enabled(X86_FEATURE_SUCCOR);
+ mce_flags.smca = cpu_feature_enabled(X86_FEATURE_SMCA);
+
+ rdmsrl(MSR_IA32_MCG_CAP, cap);
+
+ /* Use accurate RIP reporting if available. */
+ if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
+ mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
+
+ if (cap & MCG_SER_P)
+ mca_cfg.ser = 1;
+}
+
/*
* Called for each booted CPU to set up machine checks.
* Must be called with preempt off:

--
2.49.0