[PATCH 4/7] mshv: Optimize memory region mapping operations

From: Stanislav Kinsburskii

Date: Wed Apr 01 2026 - 18:21:41 EST


Two specific operations don't require PFN iteration: region unmapping
and region remapping with no access. For unmapping, all frames in MSHV
memory regions are guaranteed to be mapped with page access, so we can
unmap them all without checking individual PFNs. For remapping with no
access, all frames are already mapped with page access, allowing us to
unmap them all in one pass.

Since neither operation needs PFN validation, iterating over PFNs is
redundant. Batch operations into large page-aligned chunks followed by
remaining pages. This eliminates PFN traversal for these operations,
requires no additional hypercalls compared to the PFN-checking approach,
and provides the simplest possible sequential execution path.

The optimization utilizes HV_MAP_GPA_LARGE_PAGE and
HV_UNMAP_GPA_LARGE_PAGE flags for aligned portions, processing only the
remainder with base page granularity. This removes mshv_region_chunk_unmap()
and mshv_region_process_range() helper functions, reducing code complexity.

Signed-off-by: Stanislav Kinsburskii <skinsburskii@xxxxxxxxxxxxxxxxxxx>
---
drivers/hv/mshv_regions.c | 65 ++++++++++++++++++++++++++++++++-------------
1 file changed, 46 insertions(+), 19 deletions(-)

diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 2c4215381e0b..a92381219758 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -449,27 +449,27 @@ static int mshv_region_pin(struct mshv_region *region)
return ret < 0 ? ret : -ENOMEM;
}

-static int mshv_region_chunk_unmap(struct mshv_region *region,
- u32 flags,
- u64 pfn_offset, u64 pfn_count,
- bool huge_page)
+static int mshv_region_unmap(struct mshv_region *region)
{
- if (!pfn_valid(region->mreg_pfns[pfn_offset]))
- return 0;
+ u64 aligned_pages, remaining_pages;
+ int ret = 0;

- if (huge_page)
- flags |= HV_UNMAP_GPA_LARGE_PAGE;
+ aligned_pages = ALIGN_DOWN(region->nr_pfns, PTRS_PER_PMD);
+ remaining_pages = region->nr_pfns - aligned_pages;

- return hv_call_unmap_pfns(region->partition->pt_id,
- region->start_gfn + pfn_offset,
- pfn_count, flags);
-}
+ if (aligned_pages)
+ ret = hv_call_unmap_pfns(region->partition->pt_id,
+ region->start_gfn,
+ aligned_pages,
+ HV_UNMAP_GPA_LARGE_PAGE);

-static int mshv_region_unmap(struct mshv_region *region)
-{
- return mshv_region_process_range(region, 0,
- 0, region->nr_pfns,
- mshv_region_chunk_unmap);
+ if (!ret && remaining_pages)
+ ret = hv_call_unmap_pfns(region->partition->pt_id,
+ region->start_gfn + aligned_pages,
+ remaining_pages,
+ 0);
+
+ return ret;
}

static void mshv_region_destroy(struct kref *ref)
@@ -684,6 +684,34 @@ bool mshv_region_handle_gfn_fault(struct mshv_region *region, u64 gfn)
return !ret;
}

+static int mshv_region_map_no_access(struct mshv_region *region,
+ u64 pfn_offset, u64 pfn_count)
+{
+ u64 aligned_pages, remaining_pages;
+ int ret = 0;
+
+ aligned_pages = ALIGN_DOWN(pfn_count, PTRS_PER_PMD);
+ remaining_pages = pfn_count - aligned_pages;
+
+ if (aligned_pages)
+ ret = hv_call_map_ram_pfns(region->partition->pt_id,
+ region->start_gfn + pfn_offset,
+ aligned_pages,
+ HV_MAP_GPA_NO_ACCESS |
+ HV_MAP_GPA_LARGE_PAGE,
+ NULL);
+
+ if (!ret && remaining_pages)
+ ret = hv_call_map_ram_pfns(region->partition->pt_id,
+ region->start_gfn +
+ aligned_pages + pfn_offset,
+ remaining_pages,
+ HV_MAP_GPA_NO_ACCESS,
+ NULL);
+
+ return ret;
+}
+
/**
* mshv_region_interval_invalidate - Invalidate a range of memory region
* @mni: Pointer to the mmu_interval_notifier structure
@@ -727,8 +755,7 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,

mmu_interval_set_seq(mni, cur_seq);

- ret = mshv_region_remap_pfns(region, HV_MAP_GPA_NO_ACCESS,
- pfn_offset, pfn_count);
+ ret = mshv_region_map_no_access(region, pfn_offset, pfn_count);
if (ret)
goto out_unlock;