Re: [PATCH v5 5/6] x86/kvm: Add guest support for detecting and enabling SEV Live Migration feature.

From: Steve Rutherford
Date: Wed Aug 04 2021 - 19:09:11 EST


On Tue, Jun 29, 2021 at 8:12 AM Ashish Kalra <Ashish.Kalra@xxxxxxx> wrote:
>
> From: Ashish Kalra <ashish.kalra@xxxxxxx>
>
> The guest support for detecting and enabling SEV Live migration
> feature uses the following logic :
>
> - kvm_init_plaform() checks if its booted under the EFI
>
> - If not EFI,
>
> i) if kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL), issue a wrmsrl()
> to enable the SEV live migration support
>
> - If EFI,
>
> i) If kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL), read
> the UEFI variable which indicates OVMF support for live migration
>
> ii) the variable indicates live migration is supported, issue a wrmsrl() to
> enable the SEV live migration support
>
> The EFI live migration check is done using a late_initcall() callback.
>
> Also, ensure that _bss_decrypted section is marked as decrypted in the
> shared pages list.
>
> v5 of this patch splits the guest kernel support for SEV live migration
> and kexec support for live migration into separate patches.
>
> Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx>
> ---
> arch/x86/include/asm/mem_encrypt.h | 4 ++
> arch/x86/kernel/kvm.c | 82 ++++++++++++++++++++++++++++++
> arch/x86/mm/mem_encrypt.c | 5 ++
> 3 files changed, 91 insertions(+)
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 9c80c68d75b5..8dd373cc8b66 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -43,6 +43,8 @@ void __init sme_enable(struct boot_params *bp);
>
> int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
> int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
> + bool enc);
>
> void __init mem_encrypt_free_decrypted_mem(void);
>
> @@ -83,6 +85,8 @@ static inline int __init
> early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
> static inline int __init
> early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
> +static inline void __init
> +early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
>
> static inline void mem_encrypt_free_decrypted_mem(void) { }
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index a26643dc6bd6..a014c9bb5066 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -27,6 +27,7 @@
> #include <linux/nmi.h>
> #include <linux/swait.h>
> #include <linux/syscore_ops.h>
> +#include <linux/efi.h>
> #include <asm/timer.h>
> #include <asm/cpu.h>
> #include <asm/traps.h>
> @@ -40,6 +41,7 @@
> #include <asm/ptrace.h>
> #include <asm/reboot.h>
> #include <asm/svm.h>
> +#include <asm/e820/api.h>
>
> DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
>
> @@ -433,6 +435,8 @@ static void kvm_guest_cpu_offline(bool shutdown)
> kvm_disable_steal_time();
> if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
> wrmsrl(MSR_KVM_PV_EOI_EN, 0);
> + if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
> + wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
> kvm_pv_disable_apf();
> if (!shutdown)
> apf_task_wake_all();
> @@ -547,6 +551,55 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
> __send_ipi_mask(local_mask, vector);
> }
>
> +static int __init setup_efi_kvm_sev_migration(void)
> +{
> + efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
> + efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
> + efi_status_t status;
> + unsigned long size;
> + bool enabled;
> +
> + if (!sev_active() ||
> + !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
> + return 0;
> +
> + if (!efi_enabled(EFI_BOOT))
> + return 0;
> +
> + if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
> + pr_info("%s : EFI runtime services are not enabled\n", __func__);
> + return 0;
> + }
> +
> + size = sizeof(enabled);
> +
> + /* Get variable contents into buffer */
> + status = efi.get_variable(efi_sev_live_migration_enabled,
> + &efi_variable_guid, NULL, &size, &enabled);
> +
> + if (status == EFI_NOT_FOUND) {
> + pr_info("%s : EFI live migration variable not found\n", __func__);
> + return 0;
> + }
> +
> + if (status != EFI_SUCCESS) {
> + pr_info("%s : EFI variable retrieval failed\n", __func__);
> + return 0;
> + }
> +
> + if (enabled == 0) {
> + pr_info("%s: live migration disabled in EFI\n", __func__);
> + return 0;
> + }
> +
> + pr_info("%s : live migration enabled in EFI\n", __func__);
> + wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
> +
> + return 1;
> +}
> +
> +late_initcall(setup_efi_kvm_sev_migration);
> +
> /*
> * Set the IPI entry points
> */
> @@ -805,8 +858,37 @@ static bool __init kvm_msi_ext_dest_id(void)
> return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
> }
>
> +static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
> +{
> + kvm_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
> + KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
> +}
> +
> static void __init kvm_init_platform(void)
> {
> + if (sev_active() &&
> + kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
> + unsigned long nr_pages;
> +
> + pv_ops.mmu.notify_page_enc_status_changed =
> + kvm_sev_hc_page_enc_status;
> +
> + /*
> + * Ensure that _bss_decrypted section is marked as decrypted in the
> + * shared pages list.
> + */
> + nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
> + PAGE_SIZE);
> + early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
> + nr_pages, 0);
> +
> + /*
> + * If not booted using EFI, enable Live migration support.
> + */
> + if (!efi_enabled(EFI_BOOT))
> + wrmsrl(MSR_KVM_MIGRATION_CONTROL,
> + KVM_MIGRATION_READY);
> + }
> kvmclock_init();
> x86_platform.apic_post_init = kvm_apic_init;
> }
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 455ac487cb9d..2673a89d17d9 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -409,6 +409,11 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
> return early_set_memory_enc_dec(vaddr, size, true);
> }
>
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
> +{
> + notify_range_enc_status_changed(vaddr, npages, enc);
> +}
> +
> /*
> * SME and SEV are very similar but they are not the same, so there are
> * times that the kernel will need to distinguish between SME and SEV. The
> --
> 2.17.1
>

Reviewed-by: Steve Rutherford <srutherford@xxxxxxxxxx>