[RFC Part2 PATCH v3 14/26] KVM: SVM: VMRUN should use assosiated ASID when SEV is enabled
From: Brijesh Singh
Date: Mon Jul 24 2017 - 16:08:14 EST
SEV hardware uses ASIDs to associate memory encryption key with the
guest VMs. During the guest creation time, we use SEV_CMD_ACTIVATE
command to bind a particular ASID to the guest. Lets make sure that
VMCB is programmed with the binded ASID before a VMRUN.
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
---
arch/x86/kvm/svm.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 49 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e99a572..72f7c27 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -213,6 +213,9 @@ struct vcpu_svm {
*/
struct list_head ir_list;
spinlock_t ir_list_lock;
+
+ /* which host cpu was used for running this vcpu */
+ unsigned int last_cpuid;
};
/*
@@ -573,6 +576,8 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
+
+ struct vmcb **sev_vmcbs; /* index = sev_asid, value = vmcb pointer */
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -886,6 +891,7 @@ static void svm_cpu_uninit(int cpu)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+ kfree(sd->sev_vmcbs);
__free_page(sd->save_area);
kfree(sd);
}
@@ -904,6 +910,14 @@ static int svm_cpu_init(int cpu)
if (!sd->save_area)
goto err_1;
+ if (svm_sev_enabled()) {
+ sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *),
+ GFP_KERNEL);
+ r = -ENOMEM;
+ if (!sd->sev_vmcbs)
+ goto err_1;
+ }
+
per_cpu(svm_data, cpu) = sd;
return 0;
@@ -4442,12 +4456,40 @@ static void reload_tss(struct kvm_vcpu *vcpu)
load_TR_desc();
}
+static void pre_sev_run(struct vcpu_svm *svm)
+{
+ int cpu = raw_smp_processor_id();
+ int asid = sev_get_asid(svm->vcpu.kvm);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+ /* Assign the asid allocated with this SEV guest */
+ svm->vmcb->control.asid = asid;
+
+ /*
+ * Flush guest TLB:
+ *
+ * 1) when different VMCB for the same ASID is to be run on the same host CPU.
+ * 2) or this VMCB was executed on different host cpu in previous VMRUNs.
+ */
+ if (sd->sev_vmcbs[asid] == svm->vmcb &&
+ svm->last_cpuid == cpu)
+ return;
+
+ svm->last_cpuid = cpu;
+ sd->sev_vmcbs[asid] = svm->vmcb;
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+ mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ if (sev_guest(svm->vcpu.kvm))
+ return pre_sev_run(svm);
+
/* FIXME: handle wraparound of asid_generation */
if (svm->asid_generation != sd->asid_generation)
new_asid(svm, sd);
@@ -5523,10 +5565,16 @@ static int sev_asid_new(void)
static void sev_asid_free(int asid)
{
- int pos;
+ struct svm_cpu_data *sd;
+ int pos, cpu;
pos = asid - 1;
clear_bit(pos, sev_asid_bitmap);
+
+ for_each_possible_cpu(cpu) {
+ sd = per_cpu(svm_data, cpu);
+ sd->sev_vmcbs[pos] = NULL;
+ }
}
static int sev_firmware_init(int *error)
--
2.9.4