[PATCH 12/20] mm/zsmalloc: convert __free_zspage() to use zdsesc

From: alexs
Date: Thu Jun 27 2024 - 23:09:32 EST


From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>

Introduce zpdesc_is_locked() and convert __free_zspage() to use zpdesc.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
Signed-off-by: Alex Shi <alexs@xxxxxxxxxx>
---
mm/zpdesc.h | 4 ++++
mm/zsmalloc.c | 20 ++++++++++----------
2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index 2e35b8220e05..186ff9711ffb 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -109,4 +109,8 @@ static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
return page_zone(zpdesc_page(zpdesc));
}

+static inline bool zpdesc_is_locked(struct zpdesc *zpdesc)
+{
+ return PageLocked(zpdesc_page(zpdesc));
+}
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 216f8e6950ef..bf5a1b63bb17 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -903,23 +903,23 @@ static int trylock_zspage(struct zspage *zspage)
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
struct zspage *zspage)
{
- struct page *page, *next;
+ struct zpdesc *zpdesc, *next;

assert_spin_locked(&class->lock);

VM_BUG_ON(get_zspage_inuse(zspage));
VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);

- next = page = get_first_page(zspage);
+ next = zpdesc = get_first_zpdesc(zspage);
do {
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- next = get_next_page(page);
- reset_zpdesc(page_zpdesc(page));
- unlock_page(page);
- dec_zone_page_state(page, NR_ZSPAGES);
- put_page(page);
- page = next;
- } while (page != NULL);
+ VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc));
+ next = get_next_zpdesc(zpdesc);
+ reset_zpdesc(zpdesc);
+ zpdesc_unlock(zpdesc);
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_put(zpdesc);
+ zpdesc = next;
+ } while (zpdesc != NULL);

cache_free_zspage(pool, zspage);

--
2.43.0