[PATCH] mm: cma: add trace events for CMA alloc perf testing
From: Georgi Djakov
Date: Wed Mar 24 2021 - 12:08:44 EST
From: Liam Mark <lmark@xxxxxxxxxxxxxx>
Add cma and migrate trace events to enable CMA allocation
performance to be measured via ftrace.
Signed-off-by: Liam Mark <lmark@xxxxxxxxxxxxxx>
Signed-off-by: Georgi Djakov <georgi.djakov@xxxxxxxxxx>
---
include/trace/events/cma.h | 39 +++++++++++++++++++++++++++++++++-
include/trace/events/migrate.h | 22 +++++++++++++++++++
mm/cma.c | 4 ++++
mm/migrate.c | 2 ++
4 files changed, 66 insertions(+), 1 deletion(-)
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 5017a8829270..cdfd06afb39a 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
@@ -61,6 +61,43 @@ TRACE_EVENT(cma_release,
__entry->count)
);
+TRACE_EVENT(cma_alloc_start,
+
+ TP_PROTO(unsigned int count, unsigned int align),
+
+ TP_ARGS(count, align),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, count)
+ __field(unsigned int, align)
+ ),
+
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->align = align;
+ ),
+
+ TP_printk("count=%u align=%u",
+ __entry->count,
+ __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count, unsigned int align),
+
+ TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count, unsigned int align),
+
+ TP_ARGS(pfn, page, count, align)
+);
+
#endif /* _TRACE_CMA_H */
/* This part must be outside protection */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 363b54ce104c..9fb2a3bbcdfb 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -82,6 +82,28 @@ TRACE_EVENT(mm_migrate_pages,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+ TP_PROTO(enum migrate_mode mode, int reason),
+
+ TP_ARGS(mode, reason),
+
+ TP_STRUCT__entry(
+ __field(enum migrate_mode, mode)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->mode = mode;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("mode=%s reason=%s",
+ __print_symbolic(__entry->mode, MIGRATE_MODE),
+ __print_symbolic(__entry->reason, MIGRATE_REASON))
+);
+
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
diff --git a/mm/cma.c b/mm/cma.c
index 90e27458ddb7..984c85fd16ec 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -443,6 +443,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
if (!count)
goto out;
+ trace_cma_alloc_start(count, align);
+
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma);
@@ -483,6 +485,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
+
+ trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 47df0df8f21a..58b1b03e0c98 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1445,6 +1445,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
int rc, nr_subpages;
LIST_HEAD(ret_pages);
+ trace_mm_migrate_pages_start(mode, reason);
+
if (!swapwrite)
current->flags |= PF_SWAPWRITE;