[RFC PATCH v8 01/14] mm: add MAP_HUGETLB support to vm_mmap

From: Khalid Aziz
Date: Wed Feb 13 2019 - 19:06:40 EST


From: Tycho Andersen <tycho@xxxxxxxxxx>

vm_mmap is exported, which means kernel modules can use it. In particular,
for testing XPFO support, we want to use it with the MAP_HUGETLB flag, so
let's support it via vm_mmap.

Signed-off-by: Tycho Andersen <tycho@xxxxxxxxxx>
Tested-by: Marco Benatto <marco.antonio.780@xxxxxxxxx>
Tested-by: Khalid Aziz <khalid.aziz@xxxxxxxxxx>
---
include/linux/mm.h | 2 ++
mm/mmap.c | 19 +------------------
mm/util.c | 32 ++++++++++++++++++++++++++++++++
3 files changed, 35 insertions(+), 18 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5411de93a363..30bddc7b3c75 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2361,6 +2361,8 @@ struct vm_unmapped_area_info {
extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);

+struct file *map_hugetlb_setup(unsigned long *len, unsigned long flags);
+
/*
* Search for an unmapped address range.
*
diff --git a/mm/mmap.c b/mm/mmap.c
index 6c04292e16a7..c668d7d27c2b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1582,24 +1582,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
goto out_fput;
} else if (flags & MAP_HUGETLB) {
- struct user_struct *user = NULL;
- struct hstate *hs;
-
- hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
- if (!hs)
- return -EINVAL;
-
- len = ALIGN(len, huge_page_size(hs));
- /*
- * VM_NORESERVE is used because the reservations will be
- * taken when vm_ops->mmap() is called
- * A dummy user value is used because we are not locking
- * memory so no accounting is necessary
- */
- file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
- VM_NORESERVE,
- &user, HUGETLB_ANONHUGE_INODE,
- (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
+ file = map_hugetlb_setup(&len, flags);
if (IS_ERR(file))
return PTR_ERR(file);
}
diff --git a/mm/util.c b/mm/util.c
index 8bf08b5b5760..536c14cf88ba 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,6 +357,29 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
return ret;
}

+struct file *map_hugetlb_setup(unsigned long *len, unsigned long flags)
+{
+ struct user_struct *user = NULL;
+ struct hstate *hs;
+
+ hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
+ if (!hs)
+ return ERR_PTR(-EINVAL);
+
+ *len = ALIGN(*len, huge_page_size(hs));
+
+ /*
+ * VM_NORESERVE is used because the reservations will be
+ * taken when vm_ops->mmap() is called
+ * A dummy user value is used because we are not locking
+ * memory so no accounting is necessary
+ */
+ return hugetlb_file_setup(HUGETLB_ANON_FILE, *len,
+ VM_NORESERVE,
+ &user, HUGETLB_ANONHUGE_INODE,
+ (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
+}
+
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
@@ -366,6 +389,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
if (unlikely(offset_in_page(offset)))
return -EINVAL;

+ if (flag & MAP_HUGETLB) {
+ if (file)
+ return -EINVAL;
+
+ file = map_hugetlb_setup(&len, flag);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ }
+
return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(vm_mmap);
--
2.17.1