[RFC PATCH v1 4/4] mm/gup: flag to limit follow_page() to transhuge pages
From: Mircea CIRJALIU - MELIU
Date: Wed Dec 11 2019 - 04:29:25 EST
Sometimes the user needs to look up a transhuge page mapped at a given address.
So instead of being given a normal page and having to test it, save some cycles
by filtering out PTE mappings.
Signed-off-by: Mircea Cirjaliu <mcirjaliu@xxxxxxxxxxxxxxx>
---
include/linux/mm.h | 1 +
mm/gup.c | 13 +++++++++++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c97ea3b..64bbf83 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2579,6 +2579,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
+#define FOLL_HUGE 0x40000 /* only return huge mappings */
/*
* NOTE on FOLL_LONGTERM:
diff --git a/mm/gup.c b/mm/gup.c
index 7646bf9..a776bdc 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -361,9 +361,11 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (page)
return page;
}
- if (likely(!pmd_trans_huge(pmdval)))
+ if (likely(!pmd_trans_huge(pmdval))) {
+ if (flags & FOLL_HUGE)
+ return ERR_PTR(-EFAULT);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
-
+ }
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
@@ -382,6 +384,8 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
+ if (flags & FOLL_HUGE)
+ return ERR_PTR(-EFAULT);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
@@ -513,6 +517,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
struct page *page;
struct mm_struct *mm = vma->vm_mm;
+ VM_BUG_ON((flags & (FOLL_SPLIT | FOLL_HUGE)) == (FOLL_SPLIT | FOLL_HUGE));
+
ctx->page_mask = 0;
/* make this handle hugepd */
@@ -685,6 +691,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
return -EFAULT;
+ if (gup_flags & FOLL_HUGE && !transparent_hugepage_enabled(vma))
+ return -EFAULT;
+
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))