[PATCH v5 01/15] x86/mtrr: split off physical address size calculation

From: Juergen Gross
Date: Sat Apr 01 2023 - 02:37:10 EST


Move the calculation of the physical address size in mtrr_bp_init()
into a helper function. This will be needed later.

Do only the pure code movement without optimizing it.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Tested-by: Michael Kelley <mikelley@xxxxxxxxxxxxx>
---
V2:
- new patch
V3:
- only move code, split off optimizations (Boris Petkov)
---
arch/x86/kernel/cpu/mtrr/mtrr.c | 57 ++++++++++++++++++---------------
1 file changed, 32 insertions(+), 25 deletions(-)

diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 783f3210d582..8310bdb111d0 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -620,22 +620,14 @@ static struct syscore_ops mtrr_syscore_ops = {
int __initdata changed_by_mtrr_cleanup;

#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
-/**
- * mtrr_bp_init - initialize mtrrs on the boot CPU
- *
- * This needs to be called early; before any of the other CPUs are
- * initialized (i.e. before smp_init()).
- *
- */
-void __init mtrr_bp_init(void)
+
+static unsigned int __init mtrr_calc_physbits(bool generic)
{
- const char *why = "(not available)";
- u32 phys_addr;
+ unsigned int phys_addr;

phys_addr = 32;

- if (boot_cpu_has(X86_FEATURE_MTRR)) {
- mtrr_if = &generic_mtrr_ops;
+ if (generic) {
size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
phys_addr = 36;
@@ -667,29 +659,44 @@ void __init mtrr_bp_init(void)
size_and_mask = 0;
phys_addr = 32;
}
+ } else {
+ size_or_mask = SIZE_OR_MASK_BITS(32);
+ size_and_mask = 0;
+ }
+
+ return phys_addr;
+}
+
+/**
+ * mtrr_bp_init - initialize mtrrs on the boot CPU
+ *
+ * This needs to be called early; before any of the other CPUs are
+ * initialized (i.e. before smp_init()).
+ *
+ */
+void __init mtrr_bp_init(void)
+{
+ const char *why = "(not available)";
+ unsigned int phys_addr;
+
+ phys_addr = mtrr_calc_physbits(boot_cpu_has(X86_FEATURE_MTRR));
+
+ if (boot_cpu_has(X86_FEATURE_MTRR)) {
+ mtrr_if = &generic_mtrr_ops;
} else {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
- /* Pre-Athlon (K6) AMD CPU MTRRs */
+ /* Pre-Athlon (K6) AMD CPU MTRRs */
+ if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
mtrr_if = &amd_mtrr_ops;
- size_or_mask = SIZE_OR_MASK_BITS(32);
- size_and_mask = 0;
- }
break;
case X86_VENDOR_CENTAUR:
- if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
+ if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
mtrr_if = &centaur_mtrr_ops;
- size_or_mask = SIZE_OR_MASK_BITS(32);
- size_and_mask = 0;
- }
break;
case X86_VENDOR_CYRIX:
- if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
+ if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
mtrr_if = &cyrix_mtrr_ops;
- size_or_mask = SIZE_OR_MASK_BITS(32);
- size_and_mask = 0;
- }
break;
default:
break;
--
2.35.3