Re: [PATCH v19 083/130] KVM: TDX: Add TSX_CTRL msr into uret_msrs list

From: Binbin Wu
Date: Sun Apr 07 2024 - 03:05:38 EST




On 2/26/2024 4:26 PM, isaku.yamahata@xxxxxxxxx wrote:
From: Yang Weijiang <weijiang.yang@xxxxxxxxx>

TDX module resets the TSX_CTRL MSR to 0 at TD exit if TSX is enabled for
TD. Or it preserves the TSX_CTRL MSR if TSX is disabled for TD. VMM can
rely on uret_msrs mechanism to defer the reload of host value until exiting
to user space.

Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx>
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
v19:
- fix the type of tdx_uret_tsx_ctrl_slot. unguent int => int.
---
arch/x86/kvm/vmx/tdx.c | 33 +++++++++++++++++++++++++++++++--
arch/x86/kvm/vmx/tdx.h | 8 ++++++++
2 files changed, 39 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 7e2b1e554246..83dcaf5b6fbd 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -547,14 +547,21 @@ static struct tdx_uret_msr tdx_uret_msrs[] = {
{.msr = MSR_LSTAR,},
{.msr = MSR_TSC_AUX,},
};
+static int tdx_uret_tsx_ctrl_slot;
-static void tdx_user_return_update_cache(void)
+static void tdx_user_return_update_cache(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
kvm_user_return_update_cache(tdx_uret_msrs[i].slot,
tdx_uret_msrs[i].defval);
+ /*
+ * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise
+ * preserved.
+ */
+ if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1)

If to_kvm_tdx(vcpu->kvm)->tsx_supported is true, tdx_uret_tsx_ctrl_slot shouldn't be -1 at this point.
Otherwise, it's a KVM bug, right?
Not sure if it needs a warning if tdx_uret_tsx_ctrl_slot is -1, or just remove the check?

+ kvm_user_return_update_cache(tdx_uret_tsx_ctrl_slot, 0);
}
static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
@@ -649,7 +656,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
tdx_vcpu_enter_exit(tdx);
- tdx_user_return_update_cache();
+ tdx_user_return_update_cache(vcpu);
tdx_restore_host_xsave_state(vcpu);
tdx->host_state_need_restore = true;
@@ -1167,6 +1174,22 @@ static int setup_tdparams_xfam(struct kvm_cpuid2 *cpuid, struct td_params *td_pa
return 0;
}
+static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid)
+{
+ const struct kvm_cpuid_entry2 *entry;
+ u64 mask;
+ u32 ebx;
+
+ entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0);
+ if (entry)
+ ebx = entry->ebx;
+ else
+ ebx = 0;
+
+ mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM);
+ return ebx & mask;
+}
+
static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
struct kvm_tdx_init_vm *init_vm)
{
@@ -1209,6 +1232,7 @@ static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
+ to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid);
return 0;
}
@@ -2014,6 +2038,11 @@ int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops)
return -EIO;
}
}
+ tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL);
+ if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) {
+ pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n");
+ return -EIO;
+ }
max_pkgs = topology_max_packages();
tdx_mng_key_config_lock = kcalloc(max_pkgs, sizeof(*tdx_mng_key_config_lock),
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index e96c416e73bf..44eab734e702 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -17,6 +17,14 @@ struct kvm_tdx {
u64 xfam;
int hkid;
+ /*
+ * Used on each TD-exit, see tdx_user_return_update_cache().
+ * TSX_CTRL value on TD exit
+ * - set 0 if guest TSX enabled
+ * - preserved if guest TSX disabled
+ */
+ bool tsx_supported;
+
bool finalized;
atomic_t tdh_mem_track;