Re: [PATCH v8 03/23] mm: Check against orig_pte for finish_fault()
From: Andrew Morton
Date: Thu Apr 14 2022 - 16:57:49 EST
On Thu, 14 Apr 2022 12:30:06 -0400 Peter Xu <peterx@xxxxxxxxxx> wrote:
> > Reported-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
> >
> > Tested-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
>
> Thanks, Marek, for the fast feedback!
Certainly.
> I've also verified it for the uffd-wp case so the whole series keeps
> running as usual and nothing else shows up after the new patch replaced.
>
> Andrew, any suggestion on how we proceed with the replacement patch?
> E.g. do you want me to post it separately to the list?
I turned it into an incremental diff and queued it against [03/23]:
--- a/include/linux/mm_types.h~mm-check-against-orig_pte-for-finish_fault-fix
+++ a/include/linux/mm_types.h
@@ -814,6 +814,8 @@ typedef struct {
* @FAULT_FLAG_UNSHARE: The fault is an unsharing request to unshare (and mark
* exclusive) a possibly shared anonymous page that is
* mapped R/O.
+ * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
+ * We should only access orig_pte if this flag set.
*
* About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
* whether we would allow page faults to retry by specifying these two
@@ -850,6 +852,7 @@ enum fault_flag {
FAULT_FLAG_INSTRUCTION = 1 << 8,
FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
FAULT_FLAG_UNSHARE = 1 << 10,
+ FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
};
#endif /* _LINUX_MM_TYPES_H */
--- a/mm/memory.c~mm-check-against-orig_pte-for-finish_fault-fix
+++ a/mm/memory.c
@@ -4194,6 +4194,15 @@ void do_set_pte(struct vm_fault *vmf, st
set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
}
+static bool vmf_pte_changed(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) {
+ return !pte_same(*vmf->pte, vmf->orig_pte);
+ }
+
+ return !pte_none(*vmf->pte);
+}
+
/**
* finish_fault - finish page fault once we have prepared the page to fault
*
@@ -4252,7 +4261,7 @@ vm_fault_t finish_fault(struct vm_fault
vmf->address, &vmf->ptl);
ret = 0;
/* Re-check under ptl */
- if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
+ if (likely(!vmf_pte_changed(vmf)))
do_set_pte(vmf, page, vmf->address);
else
ret = VM_FAULT_NOPAGE;
@@ -4720,13 +4729,7 @@ static vm_fault_t handle_pte_fault(struc
* concurrent faults and from rmap lookups.
*/
vmf->pte = NULL;
- /*
- * Always initialize orig_pte. This matches with below
- * code to have orig_pte to be the none pte if pte==NULL.
- * This makes the rest code to be always safe to reference
- * it, e.g. in finish_fault() we'll detect pte changes.
- */
- pte_clear(vmf->vma->vm_mm, vmf->address, &vmf->orig_pte);
+ vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
} else {
/*
* If a huge pmd materialized under us just retry later. Use
@@ -4750,6 +4753,7 @@ static vm_fault_t handle_pte_fault(struc
*/
vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
vmf->orig_pte = *vmf->pte;
+ vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
/*
* some architectures can have larger ptes than wordsize,
_