[PATCH RFC v7 49/64] KVM: SVM: Introduce ops for the post gfn map and unmap

From: Michael Roth
Date: Wed Dec 14 2022 - 15:10:32 EST


From: Brijesh Singh <brijesh.singh@xxxxxxx>

When SEV-SNP is enabled in the guest VM, the guest memory pages can
either be a private or shared. A write from the hypervisor goes through
the RMP checks. If the CPU sees that hypervisor is attempting to write
to a guest private page, then it will trigger an RMP violation #PF.

To avoid the RMP violation with GHCB pages, added new
post_{map,unmap}_gfn functions to verify if its safe to map GHCB pages.
Use kvm->mmu_lock to guard the GHCB against invalidations while being
accessed.

Need to add generic post_{map,unmap}_gfn() ops that can be used to
verify that it's safe to map a given guest page in the hypervisor.

Link: https://lore.kernel.org/all/CABpDEukAEGwb9w12enO=fhSbHbchypsOdO2dkR4Jei3wDW6NWg@xxxxxxxxxxxxxx/
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
Signed-off-by: Jarkko Sakkinen <jarkko@xxxxxxxxxxx>
Signed-off by: Ashish Kalra <ashish.kalra@xxxxxxx>
[mdr: use kvm->mmu_lock instead of a new spinlock, this should guard
GHCB page against invalidations]
Signed-off-by: Michael Roth <michael.roth@xxxxxxx>
---
arch/x86/kvm/svm/sev.c | 40 ++++++++++++++++++++++++++++++++++++++--
arch/x86/kvm/svm/svm.h | 3 +++
2 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index abe6444bf5d4..90b509fe1826 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2926,19 +2926,28 @@ static inline int svm_map_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
{
struct vmcb_control_area *control = &svm->vmcb->control;
u64 gfn = gpa_to_gfn(control->ghcb_gpa);
+ struct kvm_vcpu *vcpu = &svm->vcpu;

- if (kvm_vcpu_map(&svm->vcpu, gfn, map)) {
+ if (kvm_vcpu_map(vcpu, gfn, map)) {
/* Unable to map GHCB from guest */
pr_err("error mapping GHCB GFN [%#llx] from guest\n", gfn);
return -EFAULT;
}

+ if (sev_post_map_gfn(vcpu->kvm, map->gfn, map->pfn)) {
+ kvm_vcpu_unmap(vcpu, map, false);
+ return -EBUSY;
+ }
+
return 0;
}

static inline void svm_unmap_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
{
- kvm_vcpu_unmap(&svm->vcpu, map, true);
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ kvm_vcpu_unmap(vcpu, map, true);
+ sev_post_unmap_gfn(vcpu->kvm, map->gfn, map->pfn);
}

static void dump_ghcb(struct vcpu_svm *svm)
@@ -3875,6 +3884,33 @@ void sev_rmp_page_level_adjust(struct kvm *kvm, gfn_t gfn, int *level)
__func__, gfn, *level, rmp_level, ret);
}

+int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn)
+{
+ int level;
+
+ if (!sev_snp_guest(kvm))
+ return 0;
+
+ read_lock(&(kvm)->mmu_lock);
+
+ /* If pfn is not added as private then fail */
+ if (snp_lookup_rmpentry(pfn, &level) == 1) {
+ read_unlock(&(kvm)->mmu_lock);
+ pr_err_ratelimited("failed to map private gfn 0x%llx pfn 0x%llx\n", gfn, pfn);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn)
+{
+ if (!sev_snp_guest(kvm))
+ return;
+
+ read_unlock(&(kvm)->mmu_lock);
+}
+
int sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault)
{
gfn_t gfn = gpa_to_gfn(gpa);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index a4d48c3e0f89..aef13c120f2d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -100,6 +100,7 @@ struct kvm_sev_info {
atomic_t migration_in_progress;
u64 snp_init_flags;
void *snp_context; /* SNP guest context page */
+ spinlock_t psc_lock;
};

struct kvm_svm {
@@ -727,6 +728,8 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
void sev_es_unmap_ghcb(struct vcpu_svm *svm);
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
void sev_rmp_page_level_adjust(struct kvm *kvm, gfn_t gfn, int *level);
+int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn);
+void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn);

int sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault);

--
2.25.1