[PATCH v12 23/69] mm: use maple tree operations for find_vma_intersection()

From: Liam Howlett
Date: Tue Jul 19 2022 - 22:19:48 EST


From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx>

Move find_vma_intersection() to mmap.c and change implementation to maple
tree.

When searching for a vma within a range, it is easier to use the maple
tree interface.

Exported find_vma_intersection() for kvm module.

Link: https://lkml.kernel.org/r/20220504011345.662299-8-Liam.Howlett@xxxxxxxxxx
Link: https://lkml.kernel.org/r/20220621204632.3370049-24-Liam.Howlett@xxxxxxxxxx
Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: David Howells <dhowells@xxxxxxxxxx>
Cc: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>
Cc: SeongJae Park <sj@xxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
include/linux/mm.h | 22 ++++------------------
mm/mmap.c | 29 +++++++++++++++++++++++++++++
mm/nommu.c | 11 +++++++++++
3 files changed, 44 insertions(+), 18 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8207fbc6ed87..55b4b53895e8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2792,26 +2792,12 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);

-/**
- * find_vma_intersection() - Look up the first VMA which intersects the interval
- * @mm: The process address space.
- * @start_addr: The inclusive start user address.
- * @end_addr: The exclusive end user address.
- *
- * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
- * start_addr < end_addr.
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
*/
-static inline
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
- unsigned long start_addr,
- unsigned long end_addr)
-{
- struct vm_area_struct *vma = find_vma(mm, start_addr);
-
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
-}
+ unsigned long start_addr, unsigned long end_addr);

/**
* vma_lookup() - Find a VMA at a specific address
diff --git a/mm/mmap.c b/mm/mmap.c
index 33b408653201..636a984602ad 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2103,6 +2103,35 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,

EXPORT_SYMBOL(get_unmapped_area);

+/**
+ * find_vma_intersection() - Look up the first VMA which intersects the interval
+ * @mm: The process address space.
+ * @start_addr: The inclusive start user address.
+ * @end_addr: The exclusive end user address.
+ *
+ * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
+ * start_addr < end_addr.
+ */
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr,
+ unsigned long end_addr)
+{
+ struct vm_area_struct *vma;
+ unsigned long index = start_addr;
+
+ mmap_assert_locked(mm);
+ /* Check the cache first. */
+ vma = vmacache_find(mm, start_addr);
+ if (likely(vma))
+ return vma;
+
+ vma = mt_find(&mm->mm_mt, &index, end_addr - 1);
+ if (vma)
+ vmacache_update(start_addr, vma);
+ return vma;
+}
+EXPORT_SYMBOL(find_vma_intersection);
+
/**
* find_vma() - Find the VMA for a given address, or the next VMA.
* @mm: The mm_struct to check
diff --git a/mm/nommu.c b/mm/nommu.c
index f2031f865dbb..b098c0251137 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -642,6 +642,17 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
vm_area_free(vma);
}

+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr,
+ unsigned long end_addr)
+{
+ unsigned long index = start_addr;
+
+ mmap_assert_locked(mm);
+ return mt_find(&mm->mm_mt, &index, end_addr - 1);
+}
+EXPORT_SYMBOL(find_vma_intersection);
+
/*
* look up the first VMA in which addr resides, NULL if none
* - should be called with mm->mmap_lock at least held readlocked
--
2.35.1