Let's convert handle_pte_fault()'s use of ptep_get_lockless() to
ptep_get_lockless_norecency() to save orig_pte.
There are a number of places that follow this model:
orig_pte = ptep_get_lockless(ptep)
...
<lock>
if (!pte_same(orig_pte, ptep_get(ptep)))
// RACE!
...
<unlock>
So we need to be careful to convert all of those to use
pte_same_norecency() so that the access and dirty bits are excluded from
the comparison.
Additionally there are a couple of places that genuinely rely on the
access and dirty bits of orig_pte, but with some careful refactoring, we
can use ptep_get() once we are holding the lock to achieve equivalent
logic.
@@ -5343,7 +5356,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte))
return 0;
- vmf->orig_pte = ptep_get_lockless(vmf->pte);
+ vmf->orig_pte = ptep_get_lockless_norecency(vmf->pte);
vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
if (pte_none(vmf->orig_pte)) {
@@ -5363,7 +5376,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
spin_lock(vmf->ptl);
entry = vmf->orig_pte;
- if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
+ if (unlikely(!pte_same_norecency(ptep_get(vmf->pte), entry))) {
update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
goto unlock;