[PATCH v2 3/6] mm/zsmalloc: use a proper page type
From: David Hildenbrand
Date: Wed May 29 2024 - 07:19:58 EST
Let's clean it up: use a proper page type and store our data (offset
into a page) in the lower 16 bit as documented.
We won't be able to support 256 KiB base pages, which is acceptable.
Teach Kconfig to handle that cleanly using a new CONFIG_HAVE_ZSMALLOC.
Based on this, we should do a proper "struct zsdesc" conversion, as
proposed in [1].
This removes the last _mapcount/page_type offender.
[1] https://lore.kernel.org/all/20231130101242.2590384-1-42.hyeyoo@xxxxxxxxx/
Cc: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
drivers/block/zram/Kconfig | 1 +
include/linux/page-flags.h | 3 +++
mm/Kconfig | 10 ++++++++--
mm/zsmalloc.c | 29 +++++++++++++++++++++++++----
4 files changed, 37 insertions(+), 6 deletions(-)
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 6aea609b795c2..40e035468de22 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -2,6 +2,7 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && MMU
+ depends on HAVE_ZSMALLOC
select ZSMALLOC
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f060db808102c..3afcbfbb379ea 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -957,6 +957,7 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#define PG_guard 0x08000000
#define PG_hugetlb 0x04008000
#define PG_slab 0x02000000
+#define PG_zsmalloc 0x01000000
#define PAGE_MAPCOUNT_RESERVE (~0x0000ffff)
#define PageType(page, flag) \
@@ -1072,6 +1073,8 @@ FOLIO_TYPE_OPS(hugetlb, hugetlb)
FOLIO_TEST_FLAG_FALSE(hugetlb)
#endif
+PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
diff --git a/mm/Kconfig b/mm/Kconfig
index b4cb45255a541..67dc18c94448d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,7 +128,7 @@ config ZSWAP_COMPRESSOR_DEFAULT
choice
prompt "Default allocator"
depends on ZSWAP
- default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
+ default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if HAVE_ZSMALLOC
default ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Selects the default allocator for the compressed cache for
@@ -154,6 +154,7 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
+ depends on HAVE_ZSMALLOC
select ZSMALLOC
help
Use the zsmalloc allocator as the default allocator.
@@ -186,10 +187,15 @@ config Z3FOLD
page. It is a ZBUD derivative so the simplicity and determinism are
still there.
+config HAVE_ZSMALLOC
+ def_bool y
+ depends on MMU
+ depends on PAGE_SIZE_LESS_THAN_256KB # we want <= 64 KiB
+
config ZSMALLOC
tristate
prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
- depends on MMU
+ depends on HAVE_ZSMALLOC
help
zsmalloc is a slab-based memory allocator designed to store
pages of various compression levels efficiently. It achieves
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a2a5866473bb8..44e0171d60036 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -20,7 +20,8 @@
* page->index: links together all component pages of a zspage
* For the huge page, this is always 0, so we use this field
* to store handle.
- * page->page_type: first object offset in a subpage of zspage
+ * page->page_type: PG_zsmalloc, lower 16 bit locate the first object
+ * offset in a subpage of a zspage
*
* Usage of struct page flags:
* PG_private: identifies the first component page
@@ -450,14 +451,28 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
+#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
+
+static inline void reset_first_obj_offset(struct page *page)
+{
+ VM_WARN_ON_ONCE(!PageZsmalloc(page));
+ page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
+}
+
static inline unsigned int get_first_obj_offset(struct page *page)
{
- return page->page_type;
+ VM_WARN_ON_ONCE(!PageZsmalloc(page));
+ return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK;
}
static inline void set_first_obj_offset(struct page *page, unsigned int offset)
{
- page->page_type = offset;
+ /* With 16 bit available, we can support offsets into 64 KiB pages. */
+ BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
+ VM_WARN_ON_ONCE(!PageZsmalloc(page));
+ VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
+ page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
+ page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
static inline unsigned int get_freeobj(struct zspage *zspage)
@@ -791,8 +806,9 @@ static void reset_page(struct page *page)
__ClearPageMovable(page);
ClearPagePrivate(page);
set_page_private(page, 0);
- page_mapcount_reset(page);
page->index = 0;
+ reset_first_obj_offset(page);
+ __ClearPageZsmalloc(page);
}
static int trylock_zspage(struct zspage *zspage)
@@ -965,11 +981,13 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
if (!page) {
while (--i >= 0) {
dec_zone_page_state(pages[i], NR_ZSPAGES);
+ __ClearPageZsmalloc(pages[i]);
__free_page(pages[i]);
}
cache_free_zspage(pool, zspage);
return NULL;
}
+ __SetPageZsmalloc(page);
inc_zone_page_state(page, NR_ZSPAGES);
pages[i] = page;
@@ -1754,6 +1772,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ /* We're committed, tell the world that this is a Zsmalloc page. */
+ __SetPageZsmalloc(newpage);
+
/* The page is locked, so this pointer must remain valid */
zspage = get_zspage(page);
pool = zspage->pool;
--
2.45.1