[RFC PATCH 4/6] KVM: guest_memfd: Implemnet bmap inode operation

From: isaku . yamahata
Date: Wed Sep 13 2023 - 06:49:25 EST


From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>

To inject memory failure, physical address of the page is needed.
Implement bmap() method to convert the file offset into physical address.

Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
virt/kvm/Kconfig | 4 ++++
virt/kvm/guest_mem.c | 28 ++++++++++++++++++++++++++++
2 files changed, 32 insertions(+)

diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 624df45baff0..eb008f0e7cc3 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -115,3 +115,7 @@ config KVM_GENERIC_PRIVATE_MEM

config HAVE_GENERIC_PRIVATE_MEM_HANDLE_ERROR
bool
+
+config KVM_GENERIC_PRIVATE_MEM_BMAP
+ depends on KVM_GENERIC_PRIVATE_MEM
+ bool
diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index 3678287d7c9d..90dfdfab1f8c 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -355,12 +355,40 @@ static int kvm_gmem_error_page(struct address_space *mapping, struct page *page)
return MF_DELAYED;
}

+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM_BMAP
+static sector_t kvm_gmem_bmap(struct address_space *mapping, sector_t block)
+{
+ struct folio *folio;
+ sector_t pfn = 0;
+
+ filemap_invalidate_lock_shared(mapping);
+
+ if (block << PAGE_SHIFT > i_size_read(mapping->host))
+ goto out;
+
+ folio = filemap_get_folio(mapping, block);
+ if (IS_ERR_OR_NULL(folio))
+ goto out;
+
+ pfn = folio_pfn(folio) + (block - folio->index);
+ folio_put(folio);
+
+out:
+ filemap_invalidate_unlock_shared(mapping);
+ return pfn;
+
+}
+#endif
+
static const struct address_space_operations kvm_gmem_aops = {
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_MIGRATION
.migrate_folio = kvm_gmem_migrate_folio,
#endif
.error_remove_page = kvm_gmem_error_page,
+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM_BMAP
+ .bmap = kvm_gmem_bmap,
+#endif
};

static int kvm_gmem_getattr(struct mnt_idmap *idmap,
--
2.25.1