[PATCH rfc -next 01/10] mm: add a generic VMA lock-based page fault handler

From: Kefeng Wang
Date: Thu Jul 13 2023 - 05:40:40 EST


There are more and more architectures enabled ARCH_SUPPORTS_PER_VMA_LOCK,
eg, x86, arm64, powerpc and s390, and riscv, those implementation are very
similar which results in some duplicated codes, let's add a generic VMA
lock-based page fault handler to eliminate them, and which also make it
easy to support this feature on new architectures.

Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>
---
include/linux/mm.h | 28 ++++++++++++++++++++++++++++
mm/memory.c | 42 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 70 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index c7886784832b..cba1b7b19c9d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -633,6 +633,15 @@ static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
#endif /* CONFIG_NUMA_BALANCING */

+struct vm_locked_fault {
+ struct mm_struct *mm;
+ unsigned long address;
+ unsigned int fault_flags;
+ unsigned long vm_flags;
+ struct pt_regs *regs;
+ unsigned long fault_code;
+};
+
#ifdef CONFIG_PER_VMA_LOCK
/*
* Try to read-lock a vma. The function is allowed to occasionally yield false
@@ -733,6 +742,19 @@ static inline void assert_fault_locked(struct vm_fault *vmf)
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);

+#define VM_LOCKED_FAULT_INIT(_name, _mm, _address, _fault_flags, _vm_flags, _regs, _fault_code) \
+ _name.mm = _mm; \
+ _name.address = _address; \
+ _name.fault_flags = _fault_flags; \
+ _name.vm_flags = _vm_flags; \
+ _name.regs = _regs; \
+ _name.fault_code = _fault_code
+
+int __weak arch_vma_check_access(struct vm_area_struct *vma,
+ struct vm_locked_fault *vmlf);
+
+int try_vma_locked_page_fault(struct vm_locked_fault *vmlf, vm_fault_t *ret);
+
#else /* CONFIG_PER_VMA_LOCK */

static inline bool vma_start_read(struct vm_area_struct *vma)
@@ -742,6 +764,12 @@ static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
static inline void vma_mark_detached(struct vm_area_struct *vma,
bool detached) {}
+#define VM_LOCKED_FAULT_INIT(_name, _mm, _address, _fault_flags, _vm_flags, _regs, _fault_code)
+static inline int try_vma_locked_page_fault(struct vm_locked_fault *vmlf,
+ vm_fault_t *ret)
+{
+ return -EINVAL;
+}

static inline void release_fault_lock(struct vm_fault *vmf)
{
diff --git a/mm/memory.c b/mm/memory.c
index ad790394963a..d3f5d1270e7a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5449,6 +5449,48 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
count_vm_vma_lock_event(VMA_LOCK_ABORT);
return NULL;
}
+
+int __weak arch_vma_check_access(struct vm_area_struct *vma,
+ struct vm_locked_fault *vmlf)
+{
+ if (!(vma->vm_flags & vmlf->vm_flags))
+ return -EINVAL;
+ return 0;
+}
+
+int try_vma_locked_page_fault(struct vm_locked_fault *vmlf, vm_fault_t *ret)
+{
+ struct vm_area_struct *vma;
+ vm_fault_t fault;
+
+ if (!(vmlf->fault_flags & FAULT_FLAG_USER))
+ return -EINVAL;
+
+ vma = lock_vma_under_rcu(vmlf->mm, vmlf->address);
+ if (!vma)
+ return -EINVAL;
+
+ if (arch_vma_check_access(vma, vmlf)) {
+ vma_end_read(vma);
+ return -EINVAL;
+ }
+
+ fault = handle_mm_fault(vma, vmlf->address,
+ vmlf->fault_flags | FAULT_FLAG_VMA_LOCK,
+ vmlf->regs);
+ *ret = fault;
+
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if ((fault & VM_FAULT_RETRY))
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ else
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+
+ return 0;
+}
+
#endif /* CONFIG_PER_VMA_LOCK */

#ifndef __PAGETABLE_P4D_FOLDED
--
2.27.0