From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
Pin the memory for the data being passed to launch_update_data()
because it gets encrypted before the guest is first run and must
not be moved which would corrupt it.
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
[ * Use kvm_for_each_memslot_in_hva_range() to find slot and iterate
* Updated sev_pin_memory_in_mmu() error handling.
* As pinning/unpining pages is handled within MMU, removed
{get,put}_user(). ]
Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx>
---
arch/x86/kvm/svm/sev.c | 146 +++++++++++++++++++++++++++++++++++++----
1 file changed, 134 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 7e39320fc65d..1c371268934b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -22,6 +22,7 @@
#include <asm/trapnr.h>
#include <asm/fpu/xcr.h>
+#include "mmu.h"
#include "x86.h"
#include "svm.h"
#include "svm_ops.h"
@@ -428,9 +429,93 @@ static void *sev_alloc_pages(struct kvm_sev_info *sev, unsigned long uaddr,
return pages;
}
+#define SEV_PFERR_RO (PFERR_USER_MASK)
+#define SEV_PFERR_RW (PFERR_WRITE_MASK | PFERR_USER_MASK)
+
+static struct page **sev_pin_memory_in_mmu(struct kvm *kvm, unsigned long addr,
+ unsigned long size,
+ unsigned long *npages)
+{
+ unsigned long hva_start, hva_end, uaddr, end, slot_start, slot_end;
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct interval_tree_node *node;
+ struct kvm_memory_slot *slot;
+ struct kvm_memslots *slots;
+ int idx, ret = 0, i = 0;
+ struct kvm_vcpu *vcpu;
+ struct page **pages;
+ kvm_pfn_t pfn;
+ u32 err_code;
+ gfn_t gfn;
+
+ pages = sev_alloc_pages(sev, addr, size, npages);
+ if (IS_ERR(pages))
+ return pages;
+
+ vcpu = kvm_get_vcpu(kvm, 0);
+ if (mutex_lock_killable(&vcpu->mutex)) {
+ kvfree(pages);
+ return ERR_PTR(-EINTR);
+ }
+
+ vcpu_load(vcpu);
+ idx = srcu_read_lock(&kvm->srcu);
+
+ kvm_mmu_load(vcpu);
+
+ end = addr + (*npages << PAGE_SHIFT);
+ slots = kvm_memslots(kvm);
+
+ kvm_for_each_memslot_in_hva_range(node, slots, addr, end) {
+ slot = container_of(node, struct kvm_memory_slot,
+ hva_node[slots->node_idx]);
+ slot_start = slot->userspace_addr;
+ slot_end = slot_start + (slot->npages << PAGE_SHIFT);
+ hva_start = max(addr, slot_start);
+ hva_end = min(end, slot_end);
+
+ err_code = (slot->flags & KVM_MEM_READONLY) ?
+ SEV_PFERR_RO : SEV_PFERR_RW;
+
+ for (uaddr = hva_start; uaddr < hva_end; uaddr += PAGE_SIZE) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (need_resched())
+ cond_resched();
+
+ /*
+ * Fault in the page and sev_pin_page() will handle the
+ * pinning
+ */
+ gfn = hva_to_gfn_memslot(uaddr, slot);
+ pfn = kvm_mmu_map_tdp_page(vcpu, gfn_to_gpa(gfn),
+ err_code, PG_LEVEL_4K);
+ if (is_error_noslot_pfn(pfn)) {
+ ret = -EFAULT;
+ break;
+ }
+ pages[i++] = pfn_to_page(pfn);
+ }
+ }