[RFC 4/4] mm: show number of vmalloc pages in /proc/meminfo
From: Roman Gushchin
Date: Fri Dec 14 2018 - 13:08:01 EST
Vmalloc() is getting more and more used these days (kernel stacks,
bpf and percpu allocator are new top users), and the total %
of memory consumed by vmalloc() can be pretty significant
and changes dynamically.
/proc/meminfo is the best place to display this information:
its top goal is to show top consumers of the memory.
Since the VmallocUsed field in /proc/meminfo is not in use
for quite a long time (it has been defined to 0 by the
commit a5ad88ce8c7f ("mm: get rid of 'vmalloc_info' from
/proc/meminfo")), let's reuse it for showing the actual
physical memory consumption of vmalloc().
Signed-off-by: Roman Gushchin <guro@xxxxxx>
Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
fs/proc/meminfo.c | 2 +-
include/linux/vmalloc.h | 3 +++
mm/vmalloc.c | 10 ++++++++++
3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 568d90e17c17..465ea0153b2a 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -120,7 +120,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Committed_AS: ", committed);
seq_printf(m, "VmallocTotal: %8lu kB\n",
(unsigned long)VMALLOC_TOTAL >> 10);
- show_val_kb(m, "VmallocUsed: ", 0ul);
+ show_val_kb(m, "VmallocUsed: ", vmalloc_nr_pages());
show_val_kb(m, "VmallocChunk: ", 0ul);
show_val_kb(m, "Percpu: ", pcpu_nr_pages());
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1205f7a03b48..ea3a0fb3a60f 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -64,10 +64,12 @@ extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
extern void __init vmalloc_init(void);
+extern unsigned long vmalloc_nr_pages(void);
#else
static inline void vmalloc_init(void)
{
}
+static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
extern void *vmalloc(unsigned long size);
@@ -83,6 +85,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
+
#ifndef CONFIG_MMU
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index edd76953c23c..39db12fe8931 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -340,6 +340,13 @@ static unsigned long cached_align;
static unsigned long vmap_area_pcpu_hole;
+atomic_long_t nr_vmalloc_pages;
+
+unsigned long vmalloc_nr_pages(void)
+{
+ return atomic_long_read(&nr_vmalloc_pages);
+}
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -1594,6 +1601,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
BUG_ON(!page);
__free_pages(page, 0);
}
+ atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
if (area->flags & VM_EXT_PAGES)
kvfree(area->pages);
@@ -1739,12 +1747,14 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
+ atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
goto fail;
}
area->pages[i] = page;
if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
cond_resched();
}
+ atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
if (map_vm_area(area, prot, area->pages))
goto fail;
--
2.19.2