Re: [PATCH v1 1/2] mm/memory: cleanly support zeropage in vm_insert_page*(), vm_map_pages*() and vmf_insert_mixed()

From: Vincent Donnefort
Date: Fri May 17 2024 - 11:07:47 EST


Hi David,

[...]

> -static int validate_page_before_insert(struct page *page)
> +static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
> +{
> + VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
> + /*
> + * Whoever wants to forbid the zeropage after some zeropages
> + * might already have been mapped has to scan the page tables and
> + * bail out on any zeropages. Zeropages in COW mappings can
> + * be unshared using FAULT_FLAG_UNSHARE faults.
> + */
> + if (mm_forbids_zeropage(vma->vm_mm))
> + return false;
> + /* zeropages in COW mappings are common and unproblematic. */
> + if (is_cow_mapping(vma->vm_flags))
> + return true;
> + /* Mappings that do not allow for writable PTEs are unproblematic. */
> + if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
> + return false;

Shouldn't we return true here?

> + /*
> + * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
> + * find the shared zeropage and longterm-pin it, which would
> + * be problematic as soon as the zeropage gets replaced by a different
> + * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
> + * now differ to what GUP looked up. FSDAX is incompatible to
> + * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
> + * check_vma_flags).
> + */
> + return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
> + (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
> +}
> +

[...]

>
> -/*
> - * This is the old fallback for page remapping.
> - *
> - * For historical reasons, it only allows reserved pages. Only
> - * old drivers should use this, and they needed to mark their
> - * pages reserved for the old functions anyway.
> - */
> static int insert_page(struct vm_area_struct *vma, unsigned long addr,
> struct page *page, pgprot_t prot)
> {
> @@ -2023,7 +2065,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
> pte_t *pte;
> spinlock_t *ptl;
>
> - retval = validate_page_before_insert(page);
> + retval = validate_page_before_insert(vma, page);
> if (retval)
> goto out;
> retval = -ENOMEM;
> @@ -2043,7 +2085,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
>
> if (!page_count(page))
> return -EINVAL;

This test here prevents inserting the zero-page.

> - err = validate_page_before_insert(page);
> + err = validate_page_before_insert(vma, page);
> if (err)
> return err;
> return insert_page_into_pte_locked(vma, pte, addr, page, prot);
> @@ -2149,7 +2191,8 @@ EXPORT_SYMBOL(vm_insert_pages);
> * @page: source kernel page
> *
> * This allows drivers to insert individual pages they've allocated
> - * into a user vma.
> + * into a user vma. The zeropage is supported in some VMAs,
> + * see vm_mixed_zeropage_allowed().
> *
> * The page has to be a nice clean _individual_ kernel allocation.
> * If you allocate a compound page, you need to have marked it as
> @@ -2195,6 +2238,8 @@ EXPORT_SYMBOL(vm_insert_page);
> * @offset: user's requested vm_pgoff
> *
> * This allows drivers to map range of kernel pages into a user vma.
> + * The zeropage is supported in some VMAs, see
> + * vm_mixed_zeropage_allowed().
> *
> * Return: 0 on success and error code otherwise.
> */
> @@ -2410,8 +2455,11 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
> }
> EXPORT_SYMBOL(vmf_insert_pfn);
>
> -static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)