[PATCH kernel v2 1/3] x86/amd: Cache values in percpu variables

From: Alexey Kardashevskiy
Date: Thu Dec 08 2022 - 23:39:32 EST


Reading DR[0-3]_ADDR_MASK MSRs takes about 250 cycles which is going to
be noticeable with the AMD KVM SEV-ES DebugSwap feature enabled.
KVM is going to store host's DR[0-3] and DR[0-3]_ADDR_MASK before
switching to a guest; the hardware is going to swap these on VMRUN
and VMEXIT.

Store MSR values passsed to set_dr_addr_mask() in percpu values
(when changed) and return them via new amd_get_dr_addr_mask().
The gain here is about 10x.

As amd_set_dr_addr_mask() uses the array too, change the @dr type to
unsigned to avoid checking for <0.

While at it, replace deprecated boot_cpu_has() with cpu_feature_enabled()
in set_dr_addr_mask().

Signed-off-by: Alexey Kardashevskiy <aik@xxxxxxx>
---
Changes:
v2:
* reworked to use arrays
* set() skips wrmsr() when the mask is not changed
* added stub for get_dr_addr_mask()
* changed @dr type to unsigned
* s/boot_cpu_has/cpu_feature_enabled/
* added amd_ prefix
---
arch/x86/include/asm/debugreg.h | 9 +++-
arch/x86/kernel/cpu/amd.c | 45 ++++++++++++++------
2 files changed, 38 insertions(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index cfdf307ddc01..59f97ba25d5f 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -126,9 +126,14 @@ static __always_inline void local_db_restore(unsigned long dr7)
}

#ifdef CONFIG_CPU_SUP_AMD
-extern void set_dr_addr_mask(unsigned long mask, int dr);
+extern void set_dr_addr_mask(unsigned long mask, unsigned int dr);
+extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
#else
-static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
+static inline void set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
+static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
+{
+ return 0;
+}
#endif

#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c75d75b9f11a..9ac5a19f89b9 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1158,24 +1158,41 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
return false;
}

-void set_dr_addr_mask(unsigned long mask, int dr)
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
+
+static unsigned int amd_msr_dr_addr_masks[] = {
+ MSR_F16H_DR0_ADDR_MASK,
+ MSR_F16H_DR1_ADDR_MASK - 1 + 1,
+ MSR_F16H_DR1_ADDR_MASK - 1 + 2,
+ MSR_F16H_DR1_ADDR_MASK - 1 + 3
+};
+
+void set_dr_addr_mask(unsigned long mask, unsigned int dr)
{
- if (!boot_cpu_has(X86_FEATURE_BPEXT))
+ if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
return;

- switch (dr) {
- case 0:
- wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
- break;
- case 1:
- case 2:
- case 3:
- wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
- break;
- default:
- break;
- }
+ if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
+ return;
+
+ if (per_cpu(amd_dr_addr_mask, smp_processor_id())[dr] == mask)
+ return;
+
+ wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
+ per_cpu(amd_dr_addr_mask, smp_processor_id())[dr] = mask;
+}
+
+unsigned long amd_get_dr_addr_mask(unsigned int dr)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
+ return 0;
+
+ if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
+ return 0;
+
+ return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
}
+EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);

u32 amd_get_highest_perf(void)
{
--
2.38.1