[PATCH v4 12/22] mm/zsmalloc: convert __free_zspage() to use zdsesc

From: alexs
Date: Mon Jul 29 2024 - 07:23:27 EST


From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>

Introduce zpdesc_is_locked() and convert __free_zspage() to use zpdesc.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
Signed-off-by: Alex Shi <alexs@xxxxxxxxxx>
---
mm/zpdesc.h | 4 ++++
mm/zsmalloc.c | 20 ++++++++++----------
2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index ad04c8337cae..72c8c072b4c8 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -119,4 +119,8 @@ static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
return page_zone(zpdesc_page(zpdesc));
}

+static inline bool zpdesc_is_locked(struct zpdesc *zpdesc)
+{
+ return PageLocked(zpdesc_page(zpdesc));
+}
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6d1971836391..68fdea7b6e0d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -901,23 +901,23 @@ static int trylock_zspage(struct zspage *zspage)
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
struct zspage *zspage)
{
- struct page *page, *next;
+ struct zpdesc *zpdesc, *next;

assert_spin_locked(&class->lock);

VM_BUG_ON(get_zspage_inuse(zspage));
VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);

- next = page = get_first_page(zspage);
+ next = zpdesc = get_first_zpdesc(zspage);
do {
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- next = get_next_page(page);
- reset_zpdesc(page_zpdesc(page));
- unlock_page(page);
- dec_zone_page_state(page, NR_ZSPAGES);
- put_page(page);
- page = next;
- } while (page != NULL);
+ VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc));
+ next = get_next_zpdesc(zpdesc);
+ reset_zpdesc(zpdesc);
+ zpdesc_unlock(zpdesc);
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_put(zpdesc);
+ zpdesc = next;
+ } while (zpdesc != NULL);

cache_free_zspage(pool, zspage);

--
2.43.0