[PATCHv2 03/13] mm, thp, tmpfs: support to add huge page into pagecache for tmpfs
From: Ning Qu
Date: Mon Oct 21 2013 - 17:46:45 EST
For replacing a page inside page cache, we assume the huge page
has been splitted before getting here.
For adding a new page to page cache, huge page support has been added.
Also refactor the shm_add_to_page_cache function.
Signed-off-by: Ning Qu <quning@xxxxxxxxx>
---
mm/shmem.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 64 insertions(+), 9 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index a857ba8..45fcca2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -277,27 +277,23 @@ static bool shmem_confirm_swap(struct address_space *mapping,
}
/*
- * Like add_to_page_cache_locked, but error if expected item has gone.
+ * Replace the swap entry with page cache entry
*/
-static int shmem_add_to_page_cache(struct page *page,
+static int shmem_replace_page_page_cache(struct page *page,
struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected)
{
int error;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(!PageSwapBacked(page));
+ BUG_ON(PageTransHugeCache(page));
page_cache_get(page);
page->mapping = mapping;
page->index = index;
spin_lock_irq(&mapping->tree_lock);
- if (!expected)
- error = radix_tree_insert(&mapping->page_tree, index, page);
- else
- error = shmem_radix_tree_replace(mapping, index, expected,
- page);
+
+ error = shmem_radix_tree_replace(mapping, index, expected, page);
if (!error) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
@@ -312,6 +308,63 @@ static int shmem_add_to_page_cache(struct page *page,
}
/*
+ * Insert new page into with page cache
+ */
+static int shmem_insert_page_page_cache(struct page *page,
+ struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
+{
+ int error;
+ int nr;
+
+ if (PageTransHugeCache(page))
+ BUILD_BUG_ON(HPAGE_CACHE_NR > RADIX_TREE_PRELOAD_NR);
+
+ nr = hpagecache_nr_pages(page);
+
+ error = radix_tree_maybe_preload_contig(nr, gfp & ~__GFP_HIGHMEM);
+ if (error)
+ return error;
+
+ spin_lock_irq(&mapping->tree_lock);
+
+ error = __add_to_page_cache_locked(page, mapping, index);
+
+ if (!error)
+ __mod_zone_page_state(page_zone(page), NR_SHMEM, nr);
+
+ radix_tree_preload_end();
+ spin_unlock_irq(&mapping->tree_lock);
+
+ if (error)
+ page_cache_release(page);
+
+ return error;
+}
+
+/*
+ * Like add_to_page_cache_locked, but error if expected item has gone.
+ */
+static int shmem_add_to_page_cache(struct page *page,
+ struct address_space *mapping,
+ pgoff_t index, gfp_t gfp, void *expected)
+{
+ int error;
+
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(!PageSwapBacked(page));
+
+ if (expected) {
+ BUG_ON(PageTransHugeCache(page));
+ error = shmem_replace_page_page_cache(page, mapping, index, gfp,
+ expected);
+ } else
+ error = shmem_insert_page_page_cache(page, mapping, index, gfp);
+
+ return error;
+}
+
+/*
* Like delete_from_page_cache, but substitutes swap for page.
*/
static void shmem_delete_from_page_cache(struct page *page, void *radswap)
@@ -319,6 +372,8 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
struct address_space *mapping = page->mapping;
int error;
+ BUG_ON(PageTransHugeCache(page));
+
spin_lock_irq(&mapping->tree_lock);
error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
page->mapping = NULL;
--
1.8.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/