RE: [PATCH 2/7] mshv: Add support to address range holes remapping

From: Michael Kelley

Date: Mon Apr 13 2026 - 17:08:39 EST


From: Stanislav Kinsburskii <skinsburskii@xxxxxxxxxxxxxxxxxxx> Sent: Monday, March 30, 2026 1:04 PM
>
> Consolidate memory region processing to handle both valid and invalid PFNs
> uniformly. This eliminates code duplication across remap, unmap, share, and
> unshare operations by using a common range processing interface.
>
> Holes are now remapped with no-access permissions to enable
> hypervisor dirty page tracking for precopy live migration.
>
> This refactoring is a precursor to an upcoming change that will map
> present pages in movable regions upon region creation, requiring
> consistent handling of both mapped and unmapped ranges.
>
> Signed-off-by: Stanislav Kinsburskii <skinsburskii@xxxxxxxxxxxxxxxxxxx>
> ---
> drivers/hv/mshv_regions.c | 108
> ++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 95 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
> index b1a707d16c07..ed9c55841140 100644
> --- a/drivers/hv/mshv_regions.c
> +++ b/drivers/hv/mshv_regions.c
> @@ -119,6 +119,57 @@ static long mshv_region_process_pfns(struct mshv_mem_region *region,
> return count;
> }
>
> +/**
> + * mshv_region_process_hole - Handle a hole (invalid PFNs) in a memory
> + * region
> + * @region : Memory region containing the hole
> + * @flags : Flags to pass to the handler function
> + * @pfn_offset: Starting PFN offset within the region
> + * @pfn_count : Number of PFNs in the hole
> + * @handler : Callback function to invoke for the hole
> + *
> + * Invokes the handler function for a contiguous hole with the specified
> + * parameters.
> + *
> + * Return: Number of PFNs handled, or negative error code.
> + */
> +static long mshv_region_process_hole(struct mshv_mem_region *region,
> + u32 flags,
> + u64 pfn_offset, u64 pfn_count,
> + int (*handler)(struct mshv_mem_region *region,
> + u32 flags,
> + u64 pfn_offset,
> + u64 pfn_count,
> + bool huge_page))
> +{
> + long ret;
> +
> + ret = handler(region, flags, pfn_offset, pfn_count, 0);
> + if (ret)
> + return ret;
> +
> + return pfn_count;
> +}
> +
> +static long mshv_region_process_chunk(struct mshv_mem_region *region,
> + u32 flags,
> + u64 pfn_offset, u64 pfn_count,
> + int (*handler)(struct mshv_mem_region *region,
> + u32 flags,
> + u64 pfn_offset,
> + u64 pfn_count,
> + bool huge_page))
> +{
> + if (pfn_valid(region->mreg_pfns[pfn_offset]))
> + return mshv_region_process_pfns(region, flags,
> + pfn_offset, pfn_count,
> + handler);
> + else
> + return mshv_region_process_hole(region, flags,
> + pfn_offset, pfn_count,
> + handler);
> +}
> +
> /**
> * mshv_region_process_range - Processes a range of PFNs in a region.
> * @region : Pointer to the memory region structure.
> @@ -146,33 +197,47 @@ static int mshv_region_process_range(struct mshv_mem_region *region,
> u64 pfn_count,
> bool huge_page))
> {
> - u64 pfn_end;
> + u64 start, end;
> long ret;
>
> - if (check_add_overflow(pfn_offset, pfn_count, &pfn_end))
> + if (!pfn_count)
> + return 0;
> +
> + if (check_add_overflow(pfn_offset, pfn_count, &end))
> return -EOVERFLOW;
>
> - if (pfn_end > region->nr_pfns)
> + if (end > region->nr_pfns)
> return -EINVAL;
>
> - while (pfn_count) {
> - /* Skip non-present pages */
> - if (!pfn_valid(region->mreg_pfns[pfn_offset])) {
> - pfn_offset++;
> - pfn_count--;
> + start = pfn_offset;
> + end = pfn_offset + 1;
> +
> + while (end < pfn_offset + pfn_count) {
> + /*
> + * Accumulate contiguous pfns with the same validity
> + * (valid or not).
> + */
> + if (pfn_valid(region->mreg_pfns[start]) ==
> + pfn_valid(region->mreg_pfns[end])) {
> + end++;
> continue;
> }
>
> - ret = mshv_region_process_pfns(region, flags,
> - pfn_offset, pfn_count,
> - handler);
> + ret = mshv_region_process_chunk(region, flags,
> + start, end - start,
> + handler);
> if (ret < 0)
> return ret;
>
> - pfn_offset += ret;
> - pfn_count -= ret;
> + start += ret;
> }
>
> + ret = mshv_region_process_chunk(region, flags,
> + start, end - start,
> + handler);
> + if (ret < 0)
> + return ret;
> +
> return 0;
> }
>
> @@ -208,6 +273,9 @@ static int mshv_region_chunk_share(struct mshv_mem_region *region,
> u64 pfn_offset, u64 pfn_count,
> bool huge_page)
> {
> + if (!pfn_valid(region->mreg_pfns[pfn_offset]))
> + return -EINVAL;
> +
> if (huge_page)
> flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
>
> @@ -233,6 +301,9 @@ static int mshv_region_chunk_unshare(struct mshv_mem_region *region,
> u64 pfn_offset, u64 pfn_count,
> bool huge_page)
> {
> + if (!pfn_valid(region->mreg_pfns[pfn_offset]))
> + return -EINVAL;
> +
> if (huge_page)
> flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
>
> @@ -256,6 +327,14 @@ static int mshv_region_chunk_remap(struct mshv_mem_region *region,
> u64 pfn_offset, u64 pfn_count,
> bool huge_page)
> {
> + /*
> + * Remap missing pages with no access to let the
> + * hypervisor track dirty pages, enabling precopy live
> + * migration.
> + */
> + if (!pfn_valid(region->mreg_pfns[pfn_offset]))
> + flags = HV_MAP_GPA_NO_ACCESS;

Is it OK to wipe out any other flags that might be set? Certainly, any previous
flags in PERMISSIONS_MASK should be removed, but what about ADJUSTABLE
and NOT_CACHED?

> +
> if (huge_page)
> flags |= HV_MAP_GPA_LARGE_PAGE;
>
> @@ -357,6 +436,9 @@ static int mshv_region_chunk_unmap(struct mshv_mem_region *region,
> u64 pfn_offset, u64 pfn_count,
> bool huge_page)
> {
> + if (!pfn_valid(region->mreg_pfns[pfn_offset]))
> + return 0;
> +
> if (huge_page)
> flags |= HV_UNMAP_GPA_LARGE_PAGE;
>
>
>