[RFC PATCH V2 2/4] KVM: x86: exchange info about lazy_tscdeadline with msr
From: Wang Jianchao
Date: Mon Jul 10 2023 - 02:04:07 EST
The lazy tsc deadline hasn't work in this version but just tranmit
the physical addrss from gust to host sdie.
- Add data structure in both guest and host side
- If feature is enabled, set msr of lazy tscdeadline when guest
cpu is initialized and clear it when cpu is offlined.
- Add msr set/get emulation code in host side.
Signed-off-by: Li Shujin <arkinjob@xxxxxxxxxxx>
Signed-off-by: Wang Jianchao <jianchwa@xxxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 4 ++++
arch/x86/kernel/kvm.c | 13 +++++++++++++
arch/x86/kvm/x86.c | 13 +++++++++++++
3 files changed, 30 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fb9d1f2..6edb1ac 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -944,6 +944,10 @@ struct kvm_vcpu_arch {
struct gfn_to_hva_cache data;
} pv_eoi;
+ struct {
+ u64 msr_val;
+ } lazy_tscdeadline;
+
u64 msr_kvm_poll_control;
/* set at EPT violation at this point */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 1cceac5..91eb333 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -67,6 +67,7 @@ early_param("no-steal-acc", parse_no_stealacc);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
+DEFINE_PER_CPU_DECRYPTED(struct kvm_lazy_tscdeadline, kvm_lazy_tscdeadline) __aligned(64) __visible;
static int has_steal_clock = 0;
static int has_guest_poll = 0;
@@ -379,6 +380,16 @@ static void kvm_guest_cpu_init(void)
if (has_steal_clock)
kvm_register_steal_time();
+
+ if (kvm_para_has_feature(KVM_FEATURE_LAZY_TSCDEADLINE)) {
+ struct kvm_lazy_tscdeadline *ptr = this_cpu_ptr(&kvm_lazy_tscdeadline);
+ unsigned long pa;
+
+ BUILD_BUG_ON(__alignof__(kvm_lazy_tscdeadline) < 4);
+ memset(ptr, 0, sizeof(*ptr));
+ pa = slow_virt_to_phys(ptr) | KVM_MSR_ENABLED;
+ wrmsrl(MSR_KVM_LAZY_TSCDEADLINE, pa);
+ }
}
static void kvm_pv_disable_apf(void)
@@ -452,6 +463,8 @@ static void kvm_guest_cpu_offline(bool shutdown)
if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
kvm_pv_disable_apf();
+ if (kvm_para_has_feature(KVM_FEATURE_LAZY_TSCDEADLINE))
+ wrmsrl(MSR_KVM_LAZY_TSCDEADLINE, 0);
if (!shutdown)
apf_task_wake_all();
kvmclock_disable();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 04b57a3..15c265a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1552,6 +1552,7 @@ static const u32 emulated_msrs_all[] = {
MSR_K7_HWCR,
MSR_KVM_POLL_CONTROL,
+ MSR_KVM_LAZY_TSCDEADLINE,
};
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
@@ -3869,7 +3870,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.msr_kvm_poll_control = data;
break;
+ case MSR_KVM_LAZY_TSCDEADLINE:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_LAZY_TSCDEADLINE))
+ return 1;
+
+ vcpu->arch.lazy_tscdeadline.msr_val = data;
+ break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
@@ -4222,6 +4229,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.msr_kvm_poll_control;
break;
+ case MSR_KVM_LAZY_TSCDEADLINE:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_LAZY_TSCDEADLINE))
+ return 1;
+
+ msr_info->data = vcpu->arch.lazy_tscdeadline.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
--
2.7.4