[RFC PATCH v2 05/21] mm/zsmalloc: convert {try,}lock_zspage() to use zsdesc
From: Hyeonggon Yoo
Date: Thu Jul 13 2023 - 00:21:36 EST
Introduce trylock_zsdesc(), unlock_zsdesc(), wait_on_zsdesc_locked()
and convert trylock_zspage() and lock_zspage() to use zsdesc.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
---
mm/zsmalloc.c | 55 ++++++++++++++++++++++++++++++++-------------------
1 file changed, 35 insertions(+), 20 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index f44a2d8a36b5..2cce76a19a1e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -326,6 +326,21 @@ static inline void zsdesc_put(struct zsdesc *zsdesc)
folio_put(folio);
}
+static inline int trylock_zsdesc(struct zsdesc *zsdesc)
+{
+ return trylock_page(zsdesc_page(zsdesc));
+}
+
+static inline void unlock_zsdesc(struct zsdesc *zsdesc)
+{
+ unlock_page(zsdesc_page(zsdesc));
+}
+
+static inline void wait_on_zsdesc_locked(struct zsdesc *zsdesc)
+{
+ wait_on_page_locked(zsdesc_page(zsdesc));
+}
+
/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
static void SetZsHugePage(struct zspage *zspage)
{
@@ -911,11 +926,11 @@ static void reset_page(struct page *page)
static int trylock_zspage(struct zspage *zspage)
{
- struct page *cursor, *fail;
+ struct zsdesc *cursor, *fail;
- for (cursor = get_first_page(zspage); cursor != NULL; cursor =
- get_next_page(cursor)) {
- if (!trylock_page(cursor)) {
+ for (cursor = get_first_zsdesc(zspage); cursor != NULL; cursor =
+ get_next_zsdesc(cursor)) {
+ if (!trylock_zsdesc(cursor)) {
fail = cursor;
goto unlock;
}
@@ -923,9 +938,9 @@ static int trylock_zspage(struct zspage *zspage)
return 1;
unlock:
- for (cursor = get_first_page(zspage); cursor != fail; cursor =
- get_next_page(cursor))
- unlock_page(cursor);
+ for (cursor = get_first_zsdesc(zspage); cursor != fail; cursor =
+ get_next_zsdesc(cursor))
+ unlock_zsdesc(cursor);
return 0;
}
@@ -1755,7 +1770,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
*/
static void lock_zspage(struct zspage *zspage)
{
- struct page *curr_page, *page;
+ struct zsdesc *curr_zsdesc, *zsdesc;
/*
* Pages we haven't locked yet can be migrated off the list while we're
@@ -1767,24 +1782,24 @@ static void lock_zspage(struct zspage *zspage)
*/
while (1) {
migrate_read_lock(zspage);
- page = get_first_page(zspage);
- if (trylock_page(page))
+ zsdesc = get_first_zsdesc(zspage);
+ if (trylock_zsdesc(zsdesc))
break;
- get_page(page);
+ zsdesc_get(zsdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ wait_on_zsdesc_locked(zsdesc);
+ zsdesc_put(zsdesc);
}
- curr_page = page;
- while ((page = get_next_page(curr_page))) {
- if (trylock_page(page)) {
- curr_page = page;
+ curr_zsdesc = zsdesc;
+ while ((zsdesc = get_next_zsdesc(curr_zsdesc))) {
+ if (trylock_zsdesc(zsdesc)) {
+ curr_zsdesc = zsdesc;
} else {
- get_page(page);
+ zsdesc_get(zsdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ wait_on_zsdesc_locked(zsdesc);
+ zsdesc_put(zsdesc);
migrate_read_lock(zspage);
}
}
--
2.41.0