[PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check
From: Zeng Guang
Date: Thu Apr 20 2023 - 10:16:30 EST
Intel introduce LASS (Linear Address Separation) feature providing
an independent mechanism to achieve the mode-based protection.
LASS partitions 64-bit linear address space into two halves, user-mode
address (LA[bit 63]=0) and supervisor-mode address (LA[bit 63]=1). It
stops any code execution or data access
1. from user mode to supervisor-mode address space
2. from supervisor mode to user-mode address space
and generates LASS violation fault accordingly.
A supervisor mode data access causes a LASS violation only if supervisor
mode access protection is enabled (CR4.SMAP = 1) and either RFLAGS.AC = 0
or the access implicitly accesses a system data structure.
Following are the rule of LASS violation check on the linear address(LA).
User access to supervisor-mode address space:
LA[bit 63] && (CPL == 3)
Supervisor access to user-mode address space:
Instruction fetch: !LA[bit 63] && (CPL < 3)
Data access: !LA[bit 63] && (CR4.SMAP==1) && ((RFLAGS.AC == 0 &&
CPL < 3) || Implicit supervisor access)
Add new ops in kvm_x86_ops to do LASS violation check.
Signed-off-by: Zeng Guang <guang.zeng@xxxxxxxxx>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 5 +++
arch/x86/kvm/vmx/vmx.c | 55 ++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 2 ++
4 files changed, 63 insertions(+)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index abccd51dcfca..f76c07f2674b 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(check_lass);
#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8ff89a52ef66..31fb8699a1ff 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -69,6 +69,9 @@
#define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
KVM_X86_NOTIFY_VMEXIT_USER)
+/* x86-specific emulation flags */
+#define KVM_X86_EMULFLAG_SKIP_LASS _BITULL(1)
+
/* x86-specific vcpu->requests bit members */
#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
@@ -1706,6 +1709,8 @@ struct kvm_x86_ops {
* Returns vCPU specific APICv inhibit reasons
*/
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
+
+ bool (*check_lass)(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
};
struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c923d7599d71..581327ede66a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8070,6 +8070,59 @@ static void vmx_vm_destroy(struct kvm *kvm)
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
}
+/*
+ * Determine whether an access to the linear address causes a LASS violation.
+ * LASS protection is only effective in long mode. As a prerequisite, caller
+ * should make sure VM running in long mode and invoke this api to do LASS
+ * violation check.
+ */
+bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
+{
+ bool user_mode, user_as, rflags_ac;
+
+ if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
+ !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
+ return false;
+
+ WARN_ON_ONCE(!is_long_mode(vcpu));
+
+ user_as = !(la >> 63);
+
+ /*
+ * An access is a supervisor-mode access if CPL < 3 or if it implicitly
+ * accesses a system data structure. For implicit accesses to system
+ * data structure, the processor acts as if RFLAGS.AC is clear.
+ */
+ if (access & PFERR_IMPLICIT_ACCESS) {
+ user_mode = false;
+ rflags_ac = false;
+ } else {
+ user_mode = vmx_get_cpl(vcpu) == 3;
+ if (!user_mode)
+ rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
+ }
+
+ if (user_mode != user_as) {
+ /*
+ * Supervisor-mode _data_ accesses to user address space
+ * cause LASS violations only if SMAP is enabled.
+ */
+ if (!user_mode && !(access & PFERR_FETCH_MASK)) {
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
+ !rflags_ac;
+ } else {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
+{
+ return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la, flags);
+}
+
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = "kvm_intel",
@@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.complete_emulated_msr = kvm_complete_insn_gp,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+ .check_lass = vmx_check_lass,
};
static unsigned int vmx_handle_intel_pt_intr(void)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index a3da84f4ea45..6569385a5978 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
+
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value)
{
--
2.27.0