[PATCH v3 3/5] tracing: Use vmap_page_range() to map memmap ring buffer
From: Steven Rostedt
Date: Tue Apr 01 2025 - 16:32:50 EST
From: Steven Rostedt <rostedt@xxxxxxxxxxx>
The code to map the physical memory retrieved by memmap currently
allocates an array of pages to cover the physical memory and then calls
vmap() to map it to a virtual address. Instead of using this temporary
array of struct page descriptors, simply use vmap_page_range() that can
directly map the contiguous physical memory to a virtual address.
Link: https://lore.kernel.org/all/CAHk-=whUOfVucfJRt7E0AH+GV41ELmS4wJqxHDnui6Giddfkzw@xxxxxxxxxxxxxx/
Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Steven Rostedt (Google) <rostedt@xxxxxxxxxxx>
---
kernel/trace/trace.c | 35 +++++++++++++++++------------------
1 file changed, 17 insertions(+), 18 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e33f3b092e2e..1d7d2b772a74 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -50,6 +50,7 @@
#include <linux/irq_work.h>
#include <linux/workqueue.h>
#include <linux/sort.h>
+#include <linux/io.h> /* vmap_page_range() */
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -9803,29 +9804,27 @@ static int instance_mkdir(const char *name)
return ret;
}
-static u64 map_pages(u64 start, u64 size)
+static u64 map_pages(unsigned long start, unsigned long size)
{
- struct page **pages;
- phys_addr_t page_start;
- unsigned int page_count;
- unsigned int i;
- void *vaddr;
+ unsigned long vmap_start, vmap_end;
+ struct vm_struct *area;
+ int ret;
- page_count = DIV_ROUND_UP(size, PAGE_SIZE);
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return 0;
- page_start = start;
- pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return 0;
+ vmap_start = (unsigned long) area->addr;
+ vmap_end = vmap_start + size;
- for (i = 0; i < page_count; i++) {
- phys_addr_t addr = page_start + i * PAGE_SIZE;
- pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
- }
- vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
- kfree(pages);
+ ret = vmap_page_range(vmap_start, vmap_end,
+ start, pgprot_nx(PAGE_KERNEL));
+ if (ret < 0) {
+ free_vm_area(area);
+ return 0;
+ }
- return (u64)(unsigned long)vaddr;
+ return (u64)vmap_start;
}
/**
--
2.47.2