[Part1 PATCH v4 16/17] X86/KVM: Unencrypt shared per-cpu variables when SEV is active

From: Brijesh Singh
Date: Sat Sep 16 2017 - 08:35:27 EST


When SEV is active, guest memory is encrypted with guest-specific key, a
guest memory region shared with hypervisor must be mapped as unencrypted
before we share it.

Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: "Radim KrÄmÃÅ" <rkrcmar@xxxxxxxxxx>
Cc: Tom Lendacky <thomas.lendacky@xxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: kvm@xxxxxxxxxxxxxxx
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
---
arch/x86/kernel/kvm.c | 46 +++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 43 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 874827b0d7ca..9ccb48b027e4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -75,8 +75,8 @@ static int parse_no_kvmclock_vsyscall(char *arg)

early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);

-static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
-static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+static DEFINE_PER_CPU_UNENCRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
+static DEFINE_PER_CPU_UNENCRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;

/*
@@ -305,7 +305,7 @@ static void kvm_register_steal_time(void)
cpu, (unsigned long long) slow_virt_to_phys(st));
}

-static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
+static DEFINE_PER_CPU_UNENCRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;

static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
{
@@ -419,9 +419,46 @@ void kvm_disable_steal_time(void)
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}

+static inline void __init __set_percpu_var_unencrypted(
+ void *var, int size)
+{
+ unsigned long pa = slow_virt_to_phys(var);
+
+ /* decrypt the memory in-place */
+ sme_early_decrypt(pa, size);
+
+ /* clear the C-bit from the page table */
+ early_set_memory_decrypted(pa, size);
+}
+
+/*
+ * Iterate through all possible CPUs and map the memory region pointed
+ * by apf_reason, steal_time and kvm_apic_eoi as unencrypted at once.
+ *
+ * Note: we iterate through all possible CPUs to ensure that CPUs
+ * hotplugged will have their per-cpu variable already mapped as
+ * unencrypted.
+ */
+static void __init set_percpu_unencrypted(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ __set_percpu_var_unencrypted(&per_cpu(apf_reason, cpu),
+ sizeof(struct kvm_vcpu_pv_apf_data));
+ __set_percpu_var_unencrypted(&per_cpu(steal_time, cpu),
+ sizeof(struct kvm_steal_time));
+ __set_percpu_var_unencrypted(&per_cpu(kvm_apic_eoi, cpu),
+ sizeof(unsigned long));
+ }
+}
+
#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
+ if (sev_active())
+ set_percpu_unencrypted();
+
kvm_guest_cpu_init();
native_smp_prepare_boot_cpu();
kvm_spinlock_init();
@@ -489,6 +526,9 @@ void __init kvm_guest_init(void)
kvm_cpu_online, kvm_cpu_down_prepare) < 0)
pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
#else
+ if (sev_active())
+ set_percpu_unencrypted();
+
kvm_guest_cpu_init();
#endif

--
2.9.5