Use the recently created mm/guest_memfd implementation. No functional
change intended.
Note: I've only compile-tested this. Appreciate some help from SEV folks
to be able to test this.
Signed-off-by: Elliot Berman <quic_eberman@xxxxxxxxxxx>
---
arch/x86/kvm/svm/sev.c | 3 +-
virt/kvm/Kconfig | 1 +
virt/kvm/guest_memfd.c | 371 ++++++++++---------------------------------------
virt/kvm/kvm_main.c | 2 -
virt/kvm/kvm_mm.h | 6 -
5 files changed, 77 insertions(+), 306 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 714c517dd4b72..f3a6857270943 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2297,8 +2297,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
kunmap_local(vaddr);
}
- ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
- sev_get_asid(kvm), true);
+static inline struct kvm_gmem *inode_to_kvm_gmem(struct inode *inode)
+{
+ struct list_head *gmem_list = &inode->i_mapping->i_private_list;
+
+ return list_first_entry_or_null(gmem_list, struct kvm_gmem, entry);
+}
+
-static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
- pgoff_t index, struct folio *folio)
+static int kvm_gmem_prepare_inaccessible(struct inode *inode, struct folio *folio)
{
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
- kvm_pfn_t pfn = folio_file_pfn(folio, index);
- gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
+ kvm_pfn_t pfn = folio_file_pfn(folio, 0);
+ gfn_t gfn = slot->base_gfn + folio_index(folio) - slot->gmem.pgoff;
int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));mark_prepared takes on additional meaning with SEV-SNP beyond uptodate, although this
if (rc) {
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
@@ -42,67 +46,7 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
return 0;
}
-static inline void kvm_gmem_mark_prepared(struct folio *folio)
-{
- folio_mark_uptodate(folio);
-}
-
-/*
- * Process @folio, which contains @gfn, so that the guest can use it.
- * The folio must be locked and the gfn must be contained in @slot.
- * On successful return the guest sees a zero page so as to avoid
- * leaking host data and the up-to-date flag is set.
- */
-static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
- gfn_t gfn, struct folio *folio)
-#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
-static void kvm_gmem_free_folio(struct folio *folio)
-{
- struct page *page = folio_page(folio, 0);
- kvm_pfn_t pfn = page_to_pfn(page);
- int order = folio_order(folio);
-
- kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
@@ -656,19 +444,12 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
break;
}
- folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
+ folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, true, &max_order);