[PATCH] mm: Add vmavec

From: Matthew Wilcox (Oracle)
Date: Wed Oct 27 2021 - 13:28:35 EST


The vmavec lets us allocate and free batches of VMAs instead of
one at a time. Should improve fork() and exit() performance.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/vmavec.h | 38 ++++++++++++++++++++++++++++++++++++++
kernel/fork.c | 17 +++++++++++++++++
mm/mmap.c | 30 ++++++++++++++++++++++--------
3 files changed, 77 insertions(+), 8 deletions(-)
create mode 100644 include/linux/vmavec.h

diff --git a/include/linux/vmavec.h b/include/linux/vmavec.h
new file mode 100644
index 000000000000..8a324e2e1258
--- /dev/null
+++ b/include/linux/vmavec.h
@@ -0,0 +1,38 @@
+/*
+ * A vma vector is an array of vm_area_structs, with a counter.
+ */
+
+struct vm_area_struct;
+
+#define VMAVEC_SIZE 15
+
+struct vmavec {
+ unsigned char nr;
+ void *vmas[VMAVEC_SIZE];
+};
+
+#define VMAVEC(name) struct vmavec name = { }
+
+static inline bool vmavec_full(struct vmavec *vmavec)
+{
+ return vmavec->nr == VMAVEC_SIZE;
+}
+
+static inline bool vmavec_empty(struct vmavec *vmavec)
+{
+ return vmavec->nr == 0;
+}
+
+static inline
+void vmavec_push(struct vmavec *vmavec, struct vm_area_struct *vma)
+{
+ vmavec->vmas[vmavec->nr++] = vma;
+}
+
+static inline struct vm_area_struct *vmavec_pop(struct vmavec *vmavec)
+{
+ return vmavec->vmas[--vmavec->nr];
+}
+
+void vm_area_free_vec(struct vmavec *vmavec);
+void vm_area_alloc_vec(struct mm_struct *mm, struct vmavec *vmavec);
diff --git a/kernel/fork.c b/kernel/fork.c
index 38681ad44c76..ea7e8bd00be8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -97,6 +97,7 @@
#include <linux/scs.h>
#include <linux/io_uring.h>
#include <linux/bpf.h>
+#include <linux/vmavec.h>

#include <asm/pgalloc.h>
#include <linux/uaccess.h>
@@ -375,6 +376,22 @@ void vm_area_free(struct vm_area_struct *vma)
kmem_cache_free(vm_area_cachep, vma);
}

+void vm_area_alloc_vec(struct mm_struct *mm, struct vmavec *vmavec)
+{
+ int i;
+
+ vmavec->nr = kmem_cache_alloc_bulk(vm_area_cachep, GFP_KERNEL,
+ VMAVEC_SIZE, vmavec->vmas);
+ for (i = 0; i < vmavec->nr; i++)
+ vma_init(vmavec->vmas[i], mm);
+}
+
+void vm_area_free_vec(struct vmavec *vmavec)
+{
+ kmem_cache_free_bulk(vm_area_cachep, vmavec->nr, vmavec->vmas);
+ vmavec->nr = 0;
+}
+
static void account_kernel_stack(struct task_struct *tsk, int account)
{
void *stack = task_stack_page(tsk);
diff --git a/mm/mmap.c b/mm/mmap.c
index 88dcc5c25225..bff4e94eec8c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -47,6 +47,7 @@
#include <linux/pkeys.h>
#include <linux/oom.h>
#include <linux/sched/mm.h>
+#include <linux/vmavec.h>

#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -172,19 +173,24 @@ void unlink_file_vma(struct vm_area_struct *vma)
}
}

-/*
- * Close a vm structure and free it, returning the next.
- */
-static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+static void __remove_vma(struct vm_area_struct *vma)
{
- struct vm_area_struct *next = vma->vm_next;
-
might_sleep();
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
+}
+
+/*
+ * Close a vm structure and free it, returning the next.
+ */
+static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *next = vma->vm_next;
+
+ __remove_vma(vma);
vm_area_free(vma);
return next;
}
@@ -3125,6 +3131,7 @@ void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
+ VMAVEC(vmavec);
unsigned long nr_accounted = 0;

/* mm's last user has gone, and its about to be pulled down */
@@ -3179,9 +3186,16 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
- vma = remove_vma(vma);
- cond_resched();
+ __remove_vma(vma);
+ vmavec_push(&vmavec, vma);
+ vma = vma->vm_next;
+ if (vmavec_full(&vmavec)) {
+ vm_area_free_vec(&vmavec);
+ cond_resched();
+ }
}
+ if (!vmavec_empty(&vmavec))
+ vm_area_free_vec(&vmavec);
vm_unacct_memory(nr_accounted);
}

--
2.33.0