[PATCH v3 12/20] mm: Introduce __vm_normal_page()

From: Laurent Dufour
Date: Fri Sep 08 2017 - 14:10:20 EST


When dealing with the speculative fault path we should use the VMA's field
cached value stored in the vm_fault structure.

Currently vm_normal_page() is using the pointer to the VMA to fetch the
vm_flags value. This patch provides a new __vm_normal_page() which is
receiving the vm_flags flags value as parameter.

Note: The speculative path is turned on for architecture providing support
for special PTE flag. So only the first block of vm_normal_page is used
during the speculative path.

Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx>
---
include/linux/mm.h | 7 +++++--
mm/memory.c | 18 ++++++++++--------
2 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index bb0c87f1c725..a2857aaa03f1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1246,8 +1246,11 @@ struct zap_details {
pgoff_t last_index; /* Highest page->index to unmap */
};

-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte, bool with_public_device);
+struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte, bool with_public_device,
+ unsigned long vma_flags);
+#define _vm_normal_page(vma, addr, pte, with_public_device) \
+ __vm_normal_page(vma, addr, pte, with_public_device, (vma)->vm_flags);
#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)

struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index c306f1f64c9e..a5b5fe833ed3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -822,8 +822,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
#else
# define HAVE_PTE_SPECIAL 0
#endif
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte, bool with_public_device)
+struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte, bool with_public_device,
+ unsigned long vma_flags)
{
unsigned long pfn = pte_pfn(pte);

@@ -832,7 +833,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
goto check_pfn;
if (vma->vm_ops && vma->vm_ops->find_special_page)
return vma->vm_ops->find_special_page(vma, addr);
- if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL;
if (is_zero_pfn(pfn))
return NULL;
@@ -864,8 +865,8 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,

/* !HAVE_PTE_SPECIAL case follows: */

- if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
- if (vma->vm_flags & VM_MIXEDMAP) {
+ if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
return NULL;
goto out;
@@ -874,7 +875,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
- if (!is_cow_mapping(vma->vm_flags))
+ if (!is_cow_mapping(vma_flags))
return NULL;
}
}
@@ -2687,7 +2688,8 @@ static int do_wp_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;

- vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
+ vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false,
+ vmf->vma_flags);
if (!vmf->page) {
/*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
@@ -3749,7 +3751,7 @@ static int do_numa_page(struct vm_fault *vmf)
ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
update_mmu_cache(vma, vmf->address, vmf->pte);

- page = vm_normal_page(vma, vmf->address, pte);
+ page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags);
if (!page) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
--
2.7.4