[PATCHv4 4/4] zsmalloc: show per fullness group class stats
From: Sergey Senozhatsky
Date: Fri Mar 03 2023 - 22:49:13 EST
We keep the old fullness (3/4 threshold) reporting in
zs_stats_size_show(). Switch from allmost full/empty
stats to fine-grained per inuse ratio (fullness group)
reporting, which gives signicantly more data on classes
fragmentation.
Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx>
---
mm/zsmalloc.c | 53 ++++++++++++++++++++++-----------------------------
1 file changed, 23 insertions(+), 30 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a61540afbb28..aea50e2aa350 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -172,9 +172,7 @@
enum fullness_group {
ZS_INUSE_RATIO_0,
ZS_INUSE_RATIO_10,
- /* NOTE: 5 more fullness groups here */
- ZS_INUSE_RATIO_70 = 7,
- /* NOTE: 2 more fullness groups here */
+ /* NOTE: 8 more fullness groups here */
ZS_INUSE_RATIO_99 = 10,
ZS_INUSE_RATIO_100,
NR_FULLNESS_GROUPS,
@@ -621,23 +619,22 @@ static unsigned long zs_can_compact(struct size_class *class);
static int zs_stats_size_show(struct seq_file *s, void *v)
{
- int i;
+ int i, fg;
struct zs_pool *pool = s->private;
struct size_class *class;
int objs_per_zspage;
- unsigned long class_almost_full, class_almost_empty;
unsigned long obj_allocated, obj_used, pages_used, freeable;
- unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
unsigned long total_freeable = 0;
+ unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, };
- seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
- "class", "size", "almost_full", "almost_empty",
+ seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n",
+ "class", "size", "10%", "20%", "30%", "40%",
+ "50%", "60%", "70%", "80%", "90%", "99%", "100%",
"obj_allocated", "obj_used", "pages_used",
"pages_per_zspage", "freeable");
for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int fg;
class = pool->size_class[i];
@@ -645,16 +642,12 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
continue;
spin_lock(&pool->lock);
- class_almost_full = 0;
- class_almost_empty = 0;
- /*
- * Replicate old behaviour for almost_full and almost_empty
- * stats.
- */
- for (fg = ZS_INUSE_RATIO_70; fg <= ZS_INUSE_RATIO_99; fg++)
- class_almost_full += zs_stat_get(class, fg);
- for (fg = ZS_INUSE_RATIO_10; fg < ZS_INUSE_RATIO_70; fg++)
- class_almost_empty += zs_stat_get(class, fg);
+
+ seq_printf(s, " %5u %5u ", i, class->size);
+ for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) {
+ inuse_totals[fg] += zs_stat_get(class, fg);
+ seq_printf(s, "%9lu ", zs_stat_get(class, fg));
+ }
obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
@@ -665,14 +658,10 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
pages_used = obj_allocated / objs_per_zspage *
class->pages_per_zspage;
- seq_printf(s, " %5u %5u %11lu %12lu %13lu"
- " %10lu %10lu %16d %8lu\n",
- i, class->size, class_almost_full, class_almost_empty,
- obj_allocated, obj_used, pages_used,
- class->pages_per_zspage, freeable);
+ seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n",
+ obj_allocated, obj_used, pages_used,
+ class->pages_per_zspage, freeable);
- total_class_almost_full += class_almost_full;
- total_class_almost_empty += class_almost_empty;
total_objs += obj_allocated;
total_used_objs += obj_used;
total_pages += pages_used;
@@ -680,10 +669,14 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
}
seq_puts(s, "\n");
- seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
- "Total", "", total_class_almost_full,
- total_class_almost_empty, total_objs,
- total_used_objs, total_pages, "", total_freeable);
+ seq_printf(s, " %5s %5s ", "Total", "");
+
+ for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++)
+ seq_printf(s, "%9lu ", inuse_totals[fg]);
+
+ seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n",
+ total_objs, total_used_objs, total_pages, "",
+ total_freeable);
return 0;
}
--
2.40.0.rc0.216.gc4246ad0f0-goog