[PATCH V2 kvm-next] KVM: guest_memfd: use kvm_gmem_get_index() in more places and smaller cleanups

From: Shivank Garg
Date: Tue Sep 02 2025 - 04:04:50 EST


Move kvm_gmem_get_index() to the top of the file and make it available for
use in more places.

Remove redundant initialization of the gmem variable because it's already
initialized.

Replace magic number -1UL with ULONG_MAX.

No functional change intended.

Signed-off-by: Shivank Garg <shivankg@xxxxxxx>
---
Applies cleanly on kvm-next (a6ad54137) and guestmemfd-preview (3d23d4a27).

Changelog:
V2: Incorporate David's suggestions.
V1: https://lore.kernel.org/all/20250901051532.207874-3-shivankg@xxxxxxx


virt/kvm/guest_memfd.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index b2d6ad80f54c..1299e5e50844 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -44,6 +44,11 @@ static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
}

+static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return gfn - slot->base_gfn + slot->gmem.pgoff;
+}
+
static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
pgoff_t index, struct folio *folio)
{
@@ -51,6 +56,7 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
kvm_pfn_t pfn = folio_file_pfn(folio, index);
gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
+
if (rc) {
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
index, gfn, pfn, rc);
@@ -107,7 +113,7 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
* checked when creating memslots.
*/
WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
- index = gfn - slot->base_gfn + slot->gmem.pgoff;
+ index = kvm_gmem_get_index(slot, gfn);
index = ALIGN_DOWN(index, 1 << folio_order(folio));
r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
if (!r)
@@ -327,8 +333,8 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
* Zap all SPTEs pointed at by this file. Do not free the backing
* memory, as its lifetime is associated with the inode, not the file.
*/
- kvm_gmem_invalidate_begin(gmem, 0, -1ul);
- kvm_gmem_invalidate_end(gmem, 0, -1ul);
+ kvm_gmem_invalidate_begin(gmem, 0, ULONG_MAX);
+ kvm_gmem_invalidate_end(gmem, 0, ULONG_MAX);

list_del(&gmem->entry);

@@ -354,10 +360,6 @@ static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
return get_file_active(&slot->gmem.file);
}

-static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
-{
- return gfn - slot->base_gfn + slot->gmem.pgoff;
-}

static bool kvm_gmem_supports_mmap(struct inode *inode)
{
@@ -940,7 +942,6 @@ static struct folio *__kvm_gmem_get_pfn(struct file *file,
return ERR_PTR(-EFAULT);
}

- gmem = file->private_data;
if (xa_load(&gmem->bindings, index) != slot) {
WARN_ON_ONCE(xa_load(&gmem->bindings, index));
return ERR_PTR(-EIO);
--
2.43.0