[PATCH] mm/cma.c: To better understand cma area during debugging, add total and used count logs to in cma_alloc

From: Xiang Gao
Date: Wed Sep 18 2024 - 08:43:44 EST


From: gaoxiang17 <gaoxiang17@xxxxxxxxxx>

[ 24.225125] cma: cma_alloc(cma (____ptrval____), name: reserved, total count 16384, used count: 64, request count 1, align 0)
[ 24.231963] cma: cma_alloc(cma (____ptrval____), name: reserved, total count 16384, used count: 65, request count 1, align 0)
[ 24.235712] cma: cma_alloc(cma (____ptrval____), name: reserved, total count 16384, used count: 66, request count 1, align 0)

Signed-off-by: gaoxiang17 <gaoxiang17@xxxxxxxxxx>
---
mm/cma.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/mm/cma.c b/mm/cma.c
index 3e9724716bad..cceff3b6a79e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -403,6 +403,17 @@ static void cma_debug_show_areas(struct cma *cma)
spin_unlock_irq(&cma->lock);
}

+static unsigned long cma_get_used(struct cma *cma)
+{
+ unsigned long used;
+
+ spin_lock_irq(&cma->lock);
+ used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
+ spin_unlock_irq(&cma->lock);
+
+ return (unsigned long)used << cma->order_per_bit;
+}
+
/**
* cma_alloc() - allocate pages from contiguous area
* @cma: Contiguous memory region for which the allocation is performed.
@@ -430,8 +441,8 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
if (!cma || !cma->count || !cma->bitmap)
return page;

- pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
- (void *)cma, cma->name, count, align);
+ pr_debug("%s(cma %p, name: %s, total count %lu, used count: %lu, request count %lu, align %d)\n", __func__,
+ (void *)cma, cma->name, cma->count, cma_get_used(cma), count, align);

if (!count)
return page;
--
2.34.1