[RFC PATCH for 4.21 04/16] mm: Introduce vm_map_user_ram, vm_unmap_user_ram (v2)
From: Mathieu Desnoyers
Date: Thu Nov 01 2018 - 05:59:30 EST
Create and destroy mappings aliased to a user-space mapping with the same
cache coloring as the userspace mapping. Allow the kernel to load from
and store to pages shared with user-space through its own mapping in
kernel virtual addresses while ensuring cache conherency between kernel
and userspace mappings for virtually aliased architectures.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@xxxxxxxxxxxx>
Reviewed-by: Matthew Wilcox <mawilcox@xxxxxxxxxxxxx>
CC: "Paul E. McKenney" <paulmck@xxxxxxxxxxxxxxxxxx>
CC: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CC: Paul Turner <pjt@xxxxxxxxxx>
CC: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CC: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
CC: Andi Kleen <andi@xxxxxxxxxxxxxx>
CC: Dave Watson <davejwatson@xxxxxx>
CC: Chris Lameter <cl@xxxxxxxxx>
CC: Ingo Molnar <mingo@xxxxxxxxxx>
CC: "H. Peter Anvin" <hpa@xxxxxxxxx>
CC: Ben Maurer <bmaurer@xxxxxx>
CC: Steven Rostedt <rostedt@xxxxxxxxxxx>
CC: Josh Triplett <josh@xxxxxxxxxxxxxxxx>
CC: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
CC: Russell King <linux@xxxxxxxxxxxxxxxx>
CC: Catalin Marinas <catalin.marinas@xxxxxxx>
CC: Will Deacon <will.deacon@xxxxxxx>
CC: Michael Kerrisk <mtk.manpages@xxxxxxxxx>
CC: Boqun Feng <boqun.feng@xxxxxxxxx>
---
Changes since v1:
- Use WARN_ON() rather than BUG_ON().
---
include/linux/vmalloc.h | 4 +++
mm/vmalloc.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 70 insertions(+)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 398e9c95cd61..899657b3d469 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -59,6 +59,10 @@ struct vmap_area {
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count,
int node, pgprot_t prot);
+extern void vm_unmap_user_ram(const void *mem, unsigned int count);
+extern void *vm_map_user_ram(struct page **pages, unsigned int count,
+ unsigned long uaddr, int node, pgprot_t prot);
+
extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a236bac872f0..8df3c572036c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1188,6 +1188,72 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
}
EXPORT_SYMBOL(vm_map_ram);
+/**
+ * vm_unmap_user_ram - unmap linear kernel address space set up by vm_map_user_ram
+ * @mem: the pointer returned by vm_map_user_ram
+ * @count: the count passed to that vm_map_user_ram call (cannot unmap partial)
+ */
+void vm_unmap_user_ram(const void *mem, unsigned int count)
+{
+ unsigned long size = (unsigned long)count << PAGE_SHIFT;
+ unsigned long addr = (unsigned long)mem;
+ struct vmap_area *va;
+
+ might_sleep();
+ if (WARN_ON(!addr) ||
+ WARN_ON(addr < VMALLOC_START) ||
+ WARN_ON(addr > VMALLOC_END) ||
+ WARN_ON(!PAGE_ALIGNED(addr)))
+ return;
+
+ debug_check_no_locks_freed(mem, size);
+ va = find_vmap_area(addr);
+ if (WARN_ON(!va))
+ return;
+ free_unmap_vmap_area(va);
+}
+EXPORT_SYMBOL(vm_unmap_user_ram);
+
+/**
+ * vm_map_user_ram - map user space pages linearly into kernel virtual address
+ * @pages: an array of pointers to the virtually contiguous pages to be mapped
+ * @count: number of pages
+ * @uaddr: address within the first page in the userspace mapping
+ * @node: prefer to allocate data structures on this node
+ * @prot: memory protection to use. PAGE_KERNEL for regular RAM
+ *
+ * Create a mapping aliased to a user-space mapping with the same cache
+ * coloring as the userspace mapping. Allow the kernel to load from and
+ * store to pages shared with user-space through its own mapping in kernel
+ * virtual addresses while ensuring cache conherency between kernel and
+ * userspace mappings for virtually aliased architectures.
+ *
+ * Returns: a pointer to the address that has been mapped, or %NULL on failure
+ */
+void *vm_map_user_ram(struct page **pages, unsigned int count,
+ unsigned long uaddr, int node, pgprot_t prot)
+{
+ unsigned long size = (unsigned long)count << PAGE_SHIFT;
+ unsigned long va_offset = ALIGN_DOWN(uaddr, PAGE_SIZE) & (SHMLBA - 1);
+ unsigned long alloc_size = ALIGN(va_offset + size, SHMLBA);
+ struct vmap_area *va;
+ unsigned long addr;
+ void *mem;
+
+ va = alloc_vmap_area(alloc_size, SHMLBA, VMALLOC_START, VMALLOC_END,
+ node, GFP_KERNEL);
+ if (IS_ERR(va))
+ return NULL;
+ addr = va->va_start + va_offset;
+ mem = (void *)addr;
+ if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+ vm_unmap_user_ram(mem, count);
+ return NULL;
+ }
+ return mem;
+}
+EXPORT_SYMBOL(vm_map_user_ram);
+
static struct vm_struct *vmlist __initdata;
/**
* vm_area_add_early - add vmap area early during boot
--
2.11.0