[PATCH v2 09/18] zsmalloc: move struct zs_meta from mapping to freelist
From: Minchan Kim
Date: Mon Mar 21 2016 - 02:31:29 EST
For supporting migration from VM, we need to have address_space
on every page so zsmalloc shouldn't use page->mapping. So,
this patch moves zs_meta from mapping to freelist.
Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx>
---
mm/zsmalloc.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0c8ccd87c084..958f27a9079d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -29,7 +29,7 @@
* Look at size_class->huge.
* page->lru: links together first pages of various zspages.
* Basically forming list of zspages in a fullness group.
- * page->mapping: override by struct zs_meta
+ * page->freelist: override by struct zs_meta
*
* Usage of struct page flags:
* PG_private: identifies the first component page
@@ -418,7 +418,7 @@ static int get_zspage_inuse(struct page *first_page)
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
return m->inuse;
}
@@ -429,7 +429,7 @@ static void set_zspage_inuse(struct page *first_page, int val)
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
m->inuse = val;
}
@@ -439,7 +439,7 @@ static void mod_zspage_inuse(struct page *first_page, int val)
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
m->inuse += val;
}
@@ -449,7 +449,7 @@ static void set_freeobj(struct page *first_page, int idx)
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
m->freeobj = idx;
}
@@ -459,7 +459,7 @@ static unsigned long get_freeobj(struct page *first_page)
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
return m->freeobj;
}
@@ -471,7 +471,7 @@ static void get_zspage_mapping(struct page *first_page,
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
*fullness = m->fullness;
*class_idx = m->class;
}
@@ -484,7 +484,7 @@ static void set_zspage_mapping(struct page *first_page,
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (struct zs_meta *)&first_page->mapping;
+ m = (struct zs_meta *)&first_page->freelist;
m->fullness = fullness;
m->class = class_idx;
}
@@ -946,7 +946,7 @@ static void reset_page(struct page *page)
clear_bit(PG_private, &page->flags);
clear_bit(PG_private_2, &page->flags);
set_page_private(page, 0);
- page->mapping = NULL;
+ page->freelist = NULL;
page_mapcount_reset(page);
}
@@ -1056,6 +1056,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
INIT_LIST_HEAD(&page->lru);
if (i == 0) { /* first page */
+ page->freelist = NULL;
SetPagePrivate(page);
set_page_private(page, 0);
first_page = page;
@@ -2068,9 +2069,9 @@ static int __init zs_init(void)
/*
* A zspage's a free object index, class index, fullness group,
- * inuse object count are encoded in its (first)page->mapping
+ * inuse object count are encoded in its (first)page->freelist
* so sizeof(struct zs_meta) should be less than
- * sizeof(page->mapping(i.e., unsigned long)).
+ * sizeof(page->freelist(i.e., void *)).
*/
BUILD_BUG_ON(sizeof(struct zs_meta) > sizeof(unsigned long));
--
1.9.1