[PATCHv2 07/13] mm, thp, tmpfs: initial support for huge page inwrite_begin/write_end in tmpfs

From: Ning Qu
Date: Mon Oct 21 2013 - 17:47:34 EST


For now we try to grab a huge cache page if the minimum requirements have been
satisfied.

Signed-off-by: Ning Qu <quning@xxxxxxxxx>
---
mm/shmem.c | 30 +++++++++++++++++++++++++-----
1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 0dd6689..af56731 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1635,8 +1635,20 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+ int ret = 0;
+ int getpage_flags = 0;
+
+ /*
+ * Do not allocate a huge page in the first huge page range in page
+ * cache. This way we can avoid most small files overhead.
+ */
+ if (pos >= HPAGE_PMD_SIZE)
+ getpage_flags |= AOP_FLAG_TRANSHUGE;

- return shmem_getpage(inode, index, pagep, SGP_WRITE, gfp, 0, NULL);
+ ret = shmem_getpage(inode, index, pagep, SGP_WRITE, gfp,
+ getpage_flags, NULL);
+
+ return ret;
}

static int
@@ -1650,10 +1662,18 @@ shmem_write_end(struct file *file, struct address_space *mapping,
i_size_write(inode, pos + copied);

if (!PageUptodate(page)) {
- if (copied < PAGE_CACHE_SIZE) {
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- zero_user_segments(page, 0, from,
- from + copied, PAGE_CACHE_SIZE);
+ if (copied < len) {
+ unsigned from;
+ if (PageTransHugeCache(page)) {
+ from = pos & ~HPAGE_PMD_MASK;
+ zero_huge_user(page, 0, from);
+ zero_huge_user(page, from + copied,
+ HPAGE_PMD_SIZE);
+ } else {
+ from = pos & ~PAGE_CACHE_MASK;
+ zero_user_segments(page, 0, from,
+ from + copied, PAGE_CACHE_SIZE);
+ }
}
SetPageUptodate(page);
}
--
1.8.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/