[PATCH 31/31] powerpc/mm: attempt speculative mm faults first

From: Michel Lespinasse
Date: Fri Apr 30 2021 - 15:54:20 EST


Attempt speculative mm fault handling first, and fall back to the
existing (non-speculative) code if that fails.

This follows the lines of the x86 speculative fault handling code,
but with some minor arch differences such as the way that the
access_pkey_error case is handled

Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx>
---
arch/powerpc/mm/fault.c | 65 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)

diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index bb368257b55c..d7c820751a58 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -398,6 +398,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
int is_write = page_fault_is_write(error_code);
vm_fault_t fault, major = 0;
bool kprobe_fault = kprobe_page_fault(regs, 11);
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+ struct vm_area_struct pvma;
+ unsigned long seq;
+#endif

if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
return 0;
@@ -450,6 +454,64 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
if (is_exec)
flags |= FAULT_FLAG_INSTRUCTION;

+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+
+ /*
+ * No need to try speculative faults for kernel or
+ * single threaded user space.
+ */
+ if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1)
+ goto no_spf;
+
+ count_vm_event(SPF_ATTEMPT);
+ seq = mmap_seq_read_start(mm);
+ if (seq & 1) {
+ count_vm_spf_event(SPF_ABORT_ODD);
+ goto spf_abort;
+ }
+ rcu_read_lock();
+ vma = find_vma(mm, address);
+ if (!vma || vma->vm_start > address) {
+ rcu_read_unlock();
+ count_vm_spf_event(SPF_ABORT_UNMAPPED);
+ goto spf_abort;
+ }
+ if (!vma_is_anonymous(vma)) {
+ rcu_read_unlock();
+ count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
+ goto spf_abort;
+ }
+ pvma = *vma;
+ rcu_read_unlock();
+ if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY))
+ goto spf_abort;
+ vma = &pvma;
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (unlikely(access_pkey_error(is_write, is_exec,
+ (error_code & DSISR_KEYFAULT), vma))) {
+ count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
+ goto spf_abort;
+ }
+#endif /* CONFIG_PPC_MEM_KEYS */
+ if (unlikely(access_error(is_write, is_exec, vma))) {
+ count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
+ goto spf_abort;
+ }
+ fault = do_handle_mm_fault(vma, address,
+ flags | FAULT_FLAG_SPECULATIVE, seq, regs);
+ major |= fault & VM_FAULT_MAJOR;
+
+ if (fault_signal_pending(fault, regs))
+ return user_mode(regs) ? 0 : SIGBUS;
+ if (!(fault & VM_FAULT_RETRY))
+ goto done;
+
+spf_abort:
+ count_vm_event(SPF_ABORT);
+no_spf:
+
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -525,6 +587,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
}

mmap_read_unlock(current->mm);
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+done:
+#endif

if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
--
2.20.1