+kvm_mmu_invalidate_begin() is called unconditionally in kvm_gmem_invalidate_begin(),
+static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
+ pgoff_t end)
+{
+ struct kvm_memory_slot *slot;
+ struct kvm *kvm = gmem->kvm;
+ unsigned long index;
+ bool flush = false;
+
+ KVM_MMU_LOCK(kvm);
+
+ kvm_mmu_invalidate_begin(kvm);
+
+ xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
+ pgoff_t pgoff = slot->gmem.pgoff;
+
+ struct kvm_gfn_range gfn_range = {
+ .start = slot->base_gfn + max(pgoff, start) - pgoff,
+ .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
+ .slot = slot,
+ .may_block = true,
+ };
+
+ flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
+ }
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ KVM_MMU_UNLOCK(kvm);
+}
+
+static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
+ pgoff_t end)
+{
+ struct kvm *kvm = gmem->kvm;
+
+ KVM_MMU_LOCK(kvm);
+ if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT))
+ kvm_mmu_invalidate_end(kvm);
+ KVM_MMU_UNLOCK(kvm);Why to loop for each gmem in gmem_list here?
+}
+
+static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+{
+ struct list_head *gmem_list = &inode->i_mapping->private_list;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + len) >> PAGE_SHIFT;
+ struct kvm_gmem *gmem;
+
+ /*
+ * Bindings must stable across invalidation to ensure the start+end
+ * are balanced.
+ */
+ filemap_invalidate_lock(inode->i_mapping);
+
+ list_for_each_entry(gmem, gmem_list, entry) {
+ kvm_gmem_invalidate_begin(gmem, start, end);
+ kvm_gmem_invalidate_end(gmem, start, end);
+ }
+[...]
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ return 0;
+}
+