Re: [PATCH v2 01/19] KVM: x86: Drop kvm_user_return_msr_cpu_online()

From: Sean Christopherson
Date: Thu Sep 01 2022 - 10:13:07 EST


On Thu, Sep 01, 2022, Chao Gao wrote:
> On Tue, Aug 30, 2022 at 05:01:16AM -0700, isaku.yamahata@xxxxxxxxx wrote:
> >From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
> >
> >KVM/X86 uses user return notifier to switch MSR for guest or user space.
> >Snapshot host values on CPU online, change MSR values for guest, and
> >restore them on returning to user space. The current code abuses
> >kvm_arch_hardware_enable() which is called on kvm module initialization or
> >CPU online.
> >
> >Remove such the abuse of kvm_arch_hardware_enable by capturing the host
> >value on the first change of the MSR value to guest VM instead of CPU
> >online.
> >
> >Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx>
> >Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
> >---
> > arch/x86/kvm/x86.c | 43 ++++++++++++++++++++++++-------------------
> > 1 file changed, 24 insertions(+), 19 deletions(-)
> >
> >diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> >index 205ebdc2b11b..16104a2f7d8e 100644
> >--- a/arch/x86/kvm/x86.c
> >+++ b/arch/x86/kvm/x86.c
> >@@ -200,6 +200,7 @@ struct kvm_user_return_msrs {
> > struct kvm_user_return_msr_values {
> > u64 host;
> > u64 curr;
> >+ bool initialized;
> > } values[KVM_MAX_NR_USER_RETURN_MSRS];
>
> The benefit of having an "initialized" state for each user return MSR on
> each CPU is small. A per-cpu state looks suffice. With it, you can keep
> kvm_user_return_msr_cpu_online() and simply call the function from
> kvm_set_user_return_msr() if initialized is false on current CPU.

Yep, a per-CPU flag is I intended. This is the completely untested patch that's
sitting in a development branch of mine.

---
arch/x86/kvm/x86.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eca76f187e4b..1328326acfae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -194,6 +194,7 @@ module_param(eager_page_split, bool, 0644);

struct kvm_user_return_msrs {
struct user_return_notifier urn;
+ bool initialized;
bool registered;
struct kvm_user_return_msr_values {
u64 host;
@@ -400,18 +401,20 @@ int kvm_find_user_return_msr(u32 msr)
return -1;
}

-static void kvm_user_return_msr_cpu_online(void)
+static void kvm_user_return_msr_init_cpu(struct kvm_user_return_msrs *msrs)
{
- unsigned int cpu = smp_processor_id();
- struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
u64 value;
int i;

+ if (msrs->initialized)
+ return;
+
for (i = 0; i < kvm_nr_uret_msrs; ++i) {
rdmsrl_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
+ msrs->initialized = true;
}

int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
@@ -420,6 +423,8 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
int err;

+ kvm_user_return_msr_init_cpu(msrs);
+
value = (value & mask) | (msrs->values[slot].host & ~mask);
if (value == msrs->values[slot].curr)
return 0;
@@ -11740,7 +11745,6 @@ int kvm_arch_hardware_enable(void)
u64 max_tsc = 0;
bool stable, backwards_tsc = false;

- kvm_user_return_msr_cpu_online();
ret = static_call(kvm_x86_hardware_enable)();
if (ret != 0)
return ret;

base-commit: a8f21d1980fbd7e877ed174142f7f572d547e611
--