[PATCH 07/18] KVM: TDX: Implement callbacks for MSR operations

From: Binbin Wu
Date: Mon Dec 09 2024 - 19:50:03 EST


From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>

Add functions to implement MSR related callbacks, .set_msr(), .get_msr(),
and .has_emulated_msr(), for preparation of handling hypercalls from TDX
guest for para-virtualized RDMSR and WRMSR. Ignore KVM_REQ_MSR_FILTER_CHANGED
for TDX.

There are three classes of MSRs virtualization for TDX.
- Non-configurable: TDX module directly virtualizes it. VMM can't configure
it, the value set by KVM_SET_MSRS is ignored.
- Configurable: TDX module directly virtualizes it. VMM can configure at the
VM creation time. The value set by KVM_SET_MSRS is used.
- #VE case: TDX guest would issue TDG.VP.VMCALL<INSTRUCTION.{WRMSR,RDMSR}>
and VMM handles the MSR hypercall. The value set by KVM_SET_MSRS is used.

For the MSRs belonging to the #VE case, the TDX module injects #VE to the
TDX guest upon RDMSR or WRMSR. The exact list of such MSRs are defined in
TDX Module ABI Spec.

Upon #VE, the TDX guest may call TDG.VP.VMCALL<INSTRUCTION.{WRMSR,RDMSR}>,
which are defined in GHCI (Guest-Host Communication Interface) so that the
host VMM (e.g. KVM) can virtualize the MSRs.

TDX doesn't allow VMM to configure interception of MSR accesses. Ignore
KVM_REQ_MSR_FILTER_CHANGED for TDX guest. If the userspace has set any
MSR filters, it will be applied when handling
TDG.VP.VMCALL<INSTRUCTION.{WRMSR,RDMSR}> in a later patch.

Suggested-by: Sean Christopherson<seanjc@xxxxxxxxxx>
Signed-off-by: Isaku Yamahata<isaku.yamahata@xxxxxxxxx>
Co-developed-by: Binbin Wu<binbin.wu@xxxxxxxxxxxxxxx>
Signed-off-by: Binbin Wu<binbin.wu@xxxxxxxxxxxxxxx>
Reviewed-by: Paolo Bonzini<pbonzini@xxxxxxxxxx>
---
TDX "the rest" breakout:
- Renamed from "KVM: TDX: Implement callbacks for MSR operations for TDX"
to "KVM: TDX: Implement callbacks for MSR operations"
- Update changelog.
- Remove the @write parameter of tdx_has_emulated_msr(), check whether the
MSR is readonly or not in tdx_set_msr(). (Sean)
https://lore.kernel.org/kvm/Zg1yPIV6cVJrwGxX@xxxxxxxxxx/
- Change the code align with the patten "make the happy path the
not-taken path". (Sean)
- Open code the handling for an emulated KVM MSR MSR_KVM_POLL_CONTROL,
and let others go through the default statement. (Sean)
- Split macro KVM_MAX_MCE_BANKS move to a separate patch. (Sean)
- Add comments in vt_msr_filter_changed().
- Add Suggested-by tag.
---
arch/x86/kvm/vmx/main.c | 50 +++++++++++++++++++++++++---
arch/x86/kvm/vmx/tdx.c | 67 ++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/x86_ops.h | 6 ++++
3 files changed, 119 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index bfe848083eb9..a224e9d32701 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -191,6 +191,48 @@ static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu)
vmx_handle_exit_irqoff(vcpu);
}

+static int vt_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ if (unlikely(is_td_vcpu(vcpu)))
+ return tdx_set_msr(vcpu, msr_info);
+
+ return vmx_set_msr(vcpu, msr_info);
+}
+
+/*
+ * The kvm parameter can be NULL (module initialization, or invocation before
+ * VM creation). Be sure to check the kvm parameter before using it.
+ */
+static bool vt_has_emulated_msr(struct kvm *kvm, u32 index)
+{
+ if (kvm && is_td(kvm))
+ return tdx_has_emulated_msr(index);
+
+ return vmx_has_emulated_msr(kvm, index);
+}
+
+static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ if (unlikely(is_td_vcpu(vcpu)))
+ return tdx_get_msr(vcpu, msr_info);
+
+ return vmx_get_msr(vcpu, msr_info);
+}
+
+static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * TDX doesn't allow VMM to configure interception of MSR accesses.
+ * TDX guest requests MSR accesses by calling TDVMCALL. The MSR
+ * filters will be applied when handling the TDVMCALL for RDMSR/WRMSR
+ * if the userspace has set any.
+ */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_msr_filter_changed(vcpu);
+}
+
#ifdef CONFIG_KVM_SMM
static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
@@ -510,7 +552,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.disable_virtualization_cpu = vt_disable_virtualization_cpu,
.emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu,

- .has_emulated_msr = vmx_has_emulated_msr,
+ .has_emulated_msr = vt_has_emulated_msr,

.vm_size = sizeof(struct kvm_vmx),

@@ -529,8 +571,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {

.update_exception_bitmap = vmx_update_exception_bitmap,
.get_feature_msr = vmx_get_feature_msr,
- .get_msr = vmx_get_msr,
- .set_msr = vmx_set_msr,
+ .get_msr = vt_get_msr,
+ .set_msr = vt_set_msr,
.get_segment_base = vmx_get_segment_base,
.get_segment = vmx_get_segment,
.set_segment = vmx_set_segment,
@@ -637,7 +679,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.apic_init_signal_blocked = vt_apic_init_signal_blocked,
.migrate_timers = vmx_migrate_timers,

- .msr_filter_changed = vmx_msr_filter_changed,
+ .msr_filter_changed = vt_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp,

.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 2b64652a0d05..770e3b847cd6 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1984,6 +1984,73 @@ void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
*error_code = 0;
}

+bool tdx_has_emulated_msr(u32 index)
+{
+ switch (index) {
+ case MSR_IA32_UCODE_REV:
+ case MSR_IA32_ARCH_CAPABILITIES:
+ case MSR_IA32_POWER_CTL:
+ case MSR_IA32_CR_PAT:
+ case MSR_IA32_TSC_DEADLINE:
+ case MSR_IA32_MISC_ENABLE:
+ case MSR_PLATFORM_INFO:
+ case MSR_MISC_FEATURES_ENABLES:
+ case MSR_IA32_APICBASE:
+ case MSR_EFER:
+ case MSR_IA32_MCG_CAP:
+ case MSR_IA32_MCG_STATUS:
+ case MSR_IA32_MCG_CTL:
+ case MSR_IA32_MCG_EXT_CTL:
+ case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
+ case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
+ /* MSR_IA32_MCx_{CTL, STATUS, ADDR, MISC, CTL2} */
+ case MSR_KVM_POLL_CONTROL:
+ return true;
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
+ /*
+ * x2APIC registers that are virtualized by the CPU can't be
+ * emulated, KVM doesn't have access to the virtual APIC page.
+ */
+ switch (index) {
+ case X2APIC_MSR(APIC_TASKPRI):
+ case X2APIC_MSR(APIC_PROCPRI):
+ case X2APIC_MSR(APIC_EOI):
+ case X2APIC_MSR(APIC_ISR) ... X2APIC_MSR(APIC_ISR + APIC_ISR_NR):
+ case X2APIC_MSR(APIC_TMR) ... X2APIC_MSR(APIC_TMR + APIC_ISR_NR):
+ case X2APIC_MSR(APIC_IRR) ... X2APIC_MSR(APIC_IRR + APIC_ISR_NR):
+ return false;
+ default:
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool tdx_is_read_only_msr(u32 index)
+{
+ return index == MSR_IA32_APICBASE || index == MSR_EFER;
+}
+
+int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+ if (!tdx_has_emulated_msr(msr->index))
+ return 1;
+
+ return kvm_get_msr_common(vcpu, msr);
+}
+
+int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+ if (tdx_is_read_only_msr(msr->index))
+ return 1;
+
+ if (!tdx_has_emulated_msr(msr->index))
+ return 1;
+
+ return kvm_set_msr_common(vcpu, msr);
+}
+
static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
{
const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 28dbef77700c..7fb1bbf12b39 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -142,6 +142,9 @@ void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
void tdx_inject_nmi(struct kvm_vcpu *vcpu);
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
+bool tdx_has_emulated_msr(u32 index);
+int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);

int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);

@@ -186,6 +189,9 @@ static inline void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mo
static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,
u64 *info2, u32 *intr_info, u32 *error_code) {}
+static inline bool tdx_has_emulated_msr(u32 index) { return false; }
+static inline int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
+static inline int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }

static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }

--
2.46.0