[PATCH v7 14/18] KVM: x86: Introduce new KVM_FEATURE_SEV_LIVE_MIGRATION feature & Custom MSR.
From: Ashish Kalra
Date: Thu Apr 30 2020 - 04:46:26 EST
From: Ashish Kalra <ashish.kalra@xxxxxxx>
Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check
for host-side support for SEV live migration. Also add a new custom
MSR_KVM_SEV_LIVE_MIG_EN for guest to enable the SEV live migration
feature.
Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx>
---
Documentation/virt/kvm/cpuid.rst | 5 +++++
Documentation/virt/kvm/msr.rst | 10 ++++++++++
arch/x86/include/uapi/asm/kvm_para.h | 5 +++++
arch/x86/kvm/svm/sev.c | 14 ++++++++++++++
arch/x86/kvm/svm/svm.c | 16 ++++++++++++++++
arch/x86/kvm/svm/svm.h | 2 ++
6 files changed, 52 insertions(+)
diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
index 01b081f6e7ea..0514523e00cd 100644
--- a/Documentation/virt/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/cpuid.rst
@@ -86,6 +86,11 @@ KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit
before using paravirtualized
sched yield.
+KVM_FEATURE_SEV_LIVE_MIGRATION 14 guest checks this feature bit before
+ using the page encryption state
+ hypercall to notify the page state
+ change
+
KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side
per-cpu warps are expeced in
kvmclock
diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
index 33892036672d..7cd7786bbb03 100644
--- a/Documentation/virt/kvm/msr.rst
+++ b/Documentation/virt/kvm/msr.rst
@@ -319,3 +319,13 @@ data:
KVM guests can request the host not to poll on HLT, for example if
they are performing polling themselves.
+
+MSR_KVM_SEV_LIVE_MIG_EN:
+ 0x4b564d06
+
+ Control SEV Live Migration features.
+
+data:
+ Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature.
+ Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions.
+ All other bits are reserved.
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 2a8e0b6b9805..d9d4953b42ad 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -31,6 +31,7 @@
#define KVM_FEATURE_PV_SEND_IPI 11
#define KVM_FEATURE_POLL_CONTROL 12
#define KVM_FEATURE_PV_SCHED_YIELD 13
+#define KVM_FEATURE_SEV_LIVE_MIGRATION 14
#define KVM_HINTS_REALTIME 0
@@ -50,6 +51,7 @@
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
#define MSR_KVM_POLL_CONTROL 0x4b564d05
+#define MSR_KVM_SEV_LIVE_MIG_EN 0x4b564d06
struct kvm_steal_time {
__u64 steal;
@@ -122,4 +124,7 @@ struct kvm_vcpu_pv_apf_data {
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
+#define KVM_SEV_LIVE_MIGRATION_ENABLED (1 << 0)
+#define KVM_SEV_LIVE_MIGRATION_EXTENSIONS_SUPPORTED (1 << 1)
+
#endif /* _UAPI_ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index ba5ecd1de644..0ac82e4aac33 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1469,6 +1469,17 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
return 0;
}
+void sev_update_migration_flags(struct kvm *kvm, u64 data)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ if (!sev_guest(kvm))
+ return;
+
+ if (data & KVM_SEV_LIVE_MIGRATION_ENABLED)
+ sev->live_migration_enabled = true;
+}
+
int svm_get_page_enc_bitmap(struct kvm *kvm,
struct kvm_page_enc_bitmap *bmap)
{
@@ -1481,6 +1492,9 @@ int svm_get_page_enc_bitmap(struct kvm *kvm,
if (!sev_guest(kvm))
return -ENOTTY;
+ if (!sev->live_migration_enabled)
+ return -EINVAL;
+
gfn_start = bmap->start_gfn;
gfn_end = gfn_start + bmap->num_pages;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 442adbbb0641..a99f5457f244 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2633,6 +2633,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->msr_decfg = data;
break;
}
+ case MSR_KVM_SEV_LIVE_MIG_EN:
+ sev_update_migration_flags(vcpu->kvm, data);
+ break;
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
@@ -3493,6 +3496,19 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
+ /*
+ * If SEV guest then enable the Live migration feature.
+ */
+ if (sev_guest(vcpu->kvm)) {
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+ if (!best)
+ return;
+
+ best->eax |= (1 << KVM_FEATURE_SEV_LIVE_MIGRATION);
+ }
+
if (!kvm_vcpu_apicv_active(vcpu))
return;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fd99e0a5417a..77f132a6fead 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -65,6 +65,7 @@ struct kvm_sev_info {
int fd; /* SEV device fd */
unsigned long pages_locked; /* Number of pages locked */
struct list_head regions_list; /* List of registered regions */
+ bool live_migration_enabled;
unsigned long *page_enc_bmap;
unsigned long page_enc_bmap_size;
};
@@ -494,5 +495,6 @@ int svm_unregister_enc_region(struct kvm *kvm,
void pre_sev_run(struct vcpu_svm *svm, int cpu);
int __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
+void sev_update_migration_flags(struct kvm *kvm, u64 data);
#endif
--
2.17.1