Re: [PATCH 3/7] mm/thp: fix vma_address() if virtual address below file offset

From: Matthew Wilcox
Date: Tue Jun 01 2021 - 17:34:30 EST


On Tue, Jun 01, 2021 at 02:09:31PM -0700, Hugh Dickins wrote:
> static inline unsigned long
> -__vma_address(struct page *page, struct vm_area_struct *vma)
> +vma_address(struct page *page, struct vm_area_struct *vma)
> {
> - pgoff_t pgoff = page_to_pgoff(page);
> - return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
> + pgoff_t pgoff;
> + unsigned long address;
> +
> + VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
> + pgoff = page_to_pgoff(page);
> + if (pgoff >= vma->vm_pgoff) {
> + address = vma->vm_start +
> + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
> + /* Check for address beyond vma (or wrapped through 0?) */
> + if (address < vma->vm_start || address >= vma->vm_end)
> + address = -EFAULT;
> + } else if (PageHead(page) &&
> + pgoff + (1UL << compound_order(page)) > vma->vm_pgoff) {

} else if (PageHead(page) &&
pgoff + compound_nr(page) > vma->vm_pgoff) {

> +vma_address_end(struct page *page, struct vm_area_struct *vma)
> {
> + pgoff_t pgoff;
> + unsigned long address;
> +
> + VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
> + pgoff = page_to_pgoff(page);
> + if (PageHead(page))
> + pgoff += 1UL << compound_order(page);
> + else
> + pgoff++;

Again, can use compound_nr here. In fact, the whole thing can be:

pgoff += compound_nr(page);