[PATCH v3 08/20] mm/zsmalloc: convert init_zspage() to use zpdesc
From: alexs
Date: Mon Jul 08 2024 - 02:30:29 EST
From: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
Replace get_first/next_page func series and kmap_atomic to new helper,
no functional change.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
Signed-off-by: Alex Shi (Tencent) <alexs@xxxxxxxxxx>
---
mm/zsmalloc.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 29b9fa5baa46..d3558f3f8bc3 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -948,16 +948,16 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
{
unsigned int freeobj = 1;
unsigned long off = 0;
- struct page *page = get_first_page(zspage);
+ struct zpdesc *zpdesc = get_first_zpdesc(zspage);
- while (page) {
- struct page *next_page;
+ while (zpdesc) {
+ struct zpdesc *next_zpdesc;
struct link_free *link;
void *vaddr;
- set_first_obj_offset(page, off);
+ set_first_obj_offset(zpdesc_page(zpdesc), off);
- vaddr = kmap_atomic(page);
+ vaddr = zpdesc_kmap_atomic(zpdesc);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
@@ -970,8 +970,8 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
* page, which must point to the first object on the next
* page (if present)
*/
- next_page = get_next_page(page);
- if (next_page) {
+ next_zpdesc = get_next_zpdesc(zpdesc);
+ if (next_zpdesc) {
link->next = freeobj++ << OBJ_TAG_BITS;
} else {
/*
@@ -981,7 +981,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
link->next = -1UL << OBJ_TAG_BITS;
}
kunmap_atomic(vaddr);
- page = next_page;
+ zpdesc = next_zpdesc;
off %= PAGE_SIZE;
}
--
2.43.0