[RFC v2 24/26] KVM: x86/asi: Populate the KVM ASI page-table

From: Alexandre Chartre
Date: Thu Jul 11 2019 - 10:28:32 EST


Add mappings to the KVM ASI page-table so that KVM can run with its
address space isolation without faulting too much.

Signed-off-by: Alexandre Chartre <alexandre.chartre@xxxxxxxxxx>
---
arch/x86/kvm/vmx/isolation.c | 155 ++++++++++++++++++++++++++++++++++++++++-
arch/x86/kvm/vmx/vmx.c | 1 -
arch/x86/kvm/vmx/vmx.h | 3 +
3 files changed, 154 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/isolation.c b/arch/x86/kvm/vmx/isolation.c
index 644d8d3..d82f6b6 100644
--- a/arch/x86/kvm/vmx/isolation.c
+++ b/arch/x86/kvm/vmx/isolation.c
@@ -5,7 +5,7 @@
* KVM Address Space Isolation
*/

-#include <linux/module.h>
+#include <linux/kvm_host.h>
#include <linux/moduleparam.h>
#include <linux/printk.h>
#include <asm/asi.h>
@@ -14,8 +14,11 @@
#include "vmx.h"
#include "x86.h"

-#define VMX_ASI_MAP_FLAGS \
- (ASI_MAP_STACK_CANARY | ASI_MAP_CPU_PTR | ASI_MAP_CURRENT_TASK)
+#define VMX_ASI_MAP_FLAGS (ASI_MAP_STACK_CANARY | \
+ ASI_MAP_CPU_PTR | \
+ ASI_MAP_CURRENT_TASK | \
+ ASI_MAP_RCU_DATA | \
+ ASI_MAP_CPU_HW_EVENTS)

/*
* When set to true, KVM #VMExit handlers run in isolated address space
@@ -34,9 +37,153 @@
static bool __read_mostly address_space_isolation;
module_param(address_space_isolation, bool, 0444);

+/*
+ * Map various kernel data.
+ */
+static int vmx_isolation_map_kernel_data(struct asi *asi)
+{
+ int err;
+
+ /* map context_tracking, used by guest_enter_irqoff() */
+ err = ASI_MAP_CPUVAR(asi, context_tracking);
+ if (err)
+ return err;
+
+ /* map irq_stat, used by kvm_*_cpu_l1tf_flush_l1d */
+ err = ASI_MAP_CPUVAR(asi, irq_stat);
+ if (err)
+ return err;
+ return 0;
+}
+
+/*
+ * Map kvm module and data from that module.
+ */
+static int vmx_isolation_map_kvm_data(struct asi *asi, struct kvm *kvm)
+{
+ int err;
+
+ /* map kvm module */
+ err = asi_map_module(asi, "kvm");
+ if (err)
+ return err;
+
+ err = asi_map_percpu(asi, kvm->srcu.sda,
+ sizeof(struct srcu_data));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Map kvm-intel module and generic x86 data.
+ */
+static int vmx_isolation_map_kvm_x86_data(struct asi *asi)
+{
+ int err;
+
+ /* map current module (kvm-intel) */
+ err = ASI_MAP_THIS_MODULE(asi);
+ if (err)
+ return err;
+
+ /* map current_vcpu, used by vcpu_enter_guest() */
+ err = ASI_MAP_CPUVAR(asi, current_vcpu);
+ if (err)
+ return (err);
+
+ return 0;
+}
+
+/*
+ * Map vmx data.
+ */
+static int vmx_isolation_map_kvm_vmx_data(struct asi *asi, struct vcpu_vmx *vmx)
+{
+ struct kvm_vmx *kvm_vmx;
+ struct kvm_vcpu *vcpu;
+ struct kvm *kvm;
+ int err;
+
+ vcpu = &vmx->vcpu;
+ kvm = vcpu->kvm;
+ kvm_vmx = to_kvm_vmx(kvm);
+
+ /* map kvm_vmx (this also maps kvm) */
+ err = asi_map(asi, kvm_vmx, sizeof(*kvm_vmx));
+ if (err)
+ return err;
+
+ /* map vmx (this also maps vcpu) */
+ err = asi_map(asi, vmx, sizeof(*vmx));
+ if (err)
+ return err;
+
+ /* map vcpu data */
+ err = asi_map(asi, vcpu->run, PAGE_SIZE);
+ if (err)
+ return err;
+
+ err = asi_map(asi, vcpu->arch.apic, sizeof(struct kvm_lapic));
+ if (err)
+ return err;
+
+ /*
+ * Map additional vmx data.
+ */
+
+ if (vmx_l1d_flush_pages) {
+ err = asi_map(asi, vmx_l1d_flush_pages,
+ PAGE_SIZE << L1D_CACHE_ORDER);
+ if (err)
+ return err;
+ }
+
+ if (enable_pml) {
+ err = asi_map(asi, vmx->pml_pg, sizeof(struct page));
+ if (err)
+ return err;
+ }
+
+ err = asi_map(asi, vmx->guest_msrs, PAGE_SIZE);
+ if (err)
+ return err;
+
+ err = asi_map(asi, vmx->vmcs01.vmcs, PAGE_SIZE << vmcs_config.order);
+ if (err)
+ return err;
+
+ err = asi_map(asi, vmx->vmcs01.msr_bitmap, PAGE_SIZE);
+ if (err)
+ return err;
+
+ err = asi_map(asi, vmx->vcpu.arch.pio_data, PAGE_SIZE);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int vmx_isolation_init_mapping(struct asi *asi, struct vcpu_vmx *vmx)
{
- /* TODO: Populate the KVM ASI page-table */
+ int err;
+
+ err = vmx_isolation_map_kernel_data(asi);
+ if (err)
+ return err;
+
+ err = vmx_isolation_map_kvm_data(asi, vmx->vcpu.kvm);
+ if (err)
+ return err;
+
+ err = vmx_isolation_map_kvm_x86_data(asi);
+ if (err)
+ return err;
+
+ err = vmx_isolation_map_kvm_vmx_data(asi, vmx);
+ if (err)
+ return err;

return 0;
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 9b92467..d47f093 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -201,7 +201,6 @@
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
};

-#define L1D_CACHE_ORDER 4
void *vmx_l1d_flush_pages;

static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 09c1593..e8de23b 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -11,6 +11,9 @@
#include "ops.h"
#include "vmcs.h"

+#define L1D_CACHE_ORDER 4
+extern void *vmx_l1d_flush_pages;
+
extern const u32 vmx_msr_index[];
extern u64 host_efer;

--
1.7.1