[PATCH v2 5/7] KVM: VMX: Disallow PT MSRs accessing if PT is not exposed to guest

From: Xiaoyao Li
Date: Fri Aug 27 2021 - 03:03:31 EST


Per SDM, it triggers #GP for all the accessing of PT MSRs, if
X86_FEATURE_INTEL_PT is not available.

Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>
---
Changes in v2:
- allow userspace/host access regradless of PT bit, (Sean)
---
arch/x86/kvm/vmx/vmx.c | 38 +++++++++++++++++++++++++-------------
1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b9d640029c40..394ef4732838 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1007,10 +1007,21 @@ static unsigned long segment_base(u16 selector)
}
#endif

-static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
+static inline bool pt_can_write_msr(struct vcpu_vmx *vmx,
+ struct msr_data *msr_info)
{
return vmx_pt_mode_is_host_guest() &&
- !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
+ !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
+ (msr_info->host_initiated ||
+ guest_cpuid_has(&vmx->vcpu, X86_FEATURE_INTEL_PT));
+}
+
+static inline bool pt_can_read_msr(struct kvm_vcpu *vcpu,
+ struct msr_data *msr_info)
+{
+ return vmx_pt_mode_is_host_guest() &&
+ (msr_info->host_initiated ||
+ guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT));
}

static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
@@ -1852,24 +1863,24 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&msr_info->data);
break;
case MSR_IA32_RTIT_CTL:
- if (!vmx_pt_mode_is_host_guest())
+ if (!pt_can_read_msr(vcpu, msr_info))
return 1;
msr_info->data = vmx->pt_desc.guest.ctl;
break;
case MSR_IA32_RTIT_STATUS:
- if (!vmx_pt_mode_is_host_guest())
+ if (!pt_can_read_msr(vcpu, msr_info))
return 1;
msr_info->data = vmx->pt_desc.guest.status;
break;
case MSR_IA32_RTIT_CR3_MATCH:
- if (!vmx_pt_mode_is_host_guest() ||
+ if (!pt_can_read_msr(vcpu, msr_info) ||
!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_cr3_filtering))
return 1;
msr_info->data = vmx->pt_desc.guest.cr3_match;
break;
case MSR_IA32_RTIT_OUTPUT_BASE:
- if (!vmx_pt_mode_is_host_guest() ||
+ if (!pt_can_read_msr(vcpu, msr_info) ||
(!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) &&
!intel_pt_validate_cap(vmx->pt_desc.caps,
@@ -1878,7 +1889,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmx->pt_desc.guest.output_base;
break;
case MSR_IA32_RTIT_OUTPUT_MASK:
- if (!vmx_pt_mode_is_host_guest() ||
+ if (!pt_can_read_msr(vcpu, msr_info) ||
(!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) &&
!intel_pt_validate_cap(vmx->pt_desc.caps,
@@ -1888,7 +1899,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
- if (!vmx_pt_mode_is_host_guest() ||
+ if (!pt_can_read_msr(vcpu, msr_info) ||
(index >= 2 * vmx->pt_desc.nr_addr_ranges))
return 1;
if (index % 2)
@@ -2156,6 +2167,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_RTIT_CTL:
if (!vmx_pt_mode_is_host_guest() ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT) ||
vmx_rtit_ctl_check(vcpu, data) ||
vmx->nested.vmxon)
return 1;
@@ -2164,14 +2176,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pt_update_intercept_for_msr(vcpu);
break;
case MSR_IA32_RTIT_STATUS:
- if (!pt_can_write_msr(vmx))
+ if (!pt_can_write_msr(vmx, msr_info))
return 1;
if (data & MSR_IA32_RTIT_STATUS_MASK)
return 1;
vmx->pt_desc.guest.status = data;
break;
case MSR_IA32_RTIT_CR3_MATCH:
- if (!pt_can_write_msr(vmx))
+ if (!pt_can_write_msr(vmx, msr_info))
return 1;
if (!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_cr3_filtering))
@@ -2179,7 +2191,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx->pt_desc.guest.cr3_match = data;
break;
case MSR_IA32_RTIT_OUTPUT_BASE:
- if (!pt_can_write_msr(vmx))
+ if (!pt_can_write_msr(vmx, msr_info))
return 1;
if (!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) &&
@@ -2191,7 +2203,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx->pt_desc.guest.output_base = data;
break;
case MSR_IA32_RTIT_OUTPUT_MASK:
- if (!pt_can_write_msr(vmx))
+ if (!pt_can_write_msr(vmx, msr_info))
return 1;
if (!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) &&
@@ -2201,7 +2213,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx->pt_desc.guest.output_mask = data;
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
- if (!pt_can_write_msr(vmx))
+ if (!pt_can_write_msr(vmx, msr_info))
return 1;
index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
if (index >= 2 * vmx->pt_desc.nr_addr_ranges)
--
2.27.0