huge_pmd_set_accessed is only called by __handle_mm_fault from memory.c,
move the definition to memory.c and make it static like create_huge_pmd and
wp_huge_pmd.
Signed-off-by: Yang Shi <yang.shi@xxxxxxxxxx>
---
include/linux/huge_mm.h | 4 ----
mm/huge_memory.c | 23 -----------------------
mm/memory.c | 23 +++++++++++++++++++++++
3 files changed, 23 insertions(+), 27 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623..c218ab7b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -8,10 +8,6 @@ extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmd,
- pmd_t orig_pmd, int dirty);
extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pmd_t orig_pmd);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fecbbc5..6c14cb6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1137,29 +1137,6 @@ out:
return ret;
}
-void huge_pmd_set_accessed(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmd, pmd_t orig_pmd,
- int dirty)
-{
- spinlock_t *ptl;
- pmd_t entry;
- unsigned long haddr;
-
- ptl = pmd_lock(mm, pmd);
- if (unlikely(!pmd_same(*pmd, orig_pmd)))
- goto unlock;
-
- entry = pmd_mkyoung(orig_pmd);
- haddr = address & HPAGE_PMD_MASK;
- if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
- update_mmu_cache_pmd(vma, address, pmd);
-
-unlock:
- spin_unlock(ptl);
-}
-
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
diff --git a/mm/memory.c b/mm/memory.c
index 93897f2..6ced4eb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3287,6 +3287,29 @@ static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_FALLBACK;
}
+static void huge_pmd_set_accessed(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmd, pmd_t orig_pmd,
+ int dirty)
+{
+ spinlock_t *ptl;
+ pmd_t entry;
+ unsigned long haddr;
+
+ ptl = pmd_lock(mm, pmd);
+ if (unlikely(!pmd_same(*pmd, orig_pmd)))
+ goto unlock;
+
+ entry = pmd_mkyoung(orig_pmd);
+ haddr = address & HPAGE_PMD_MASK;
+ if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
+ update_mmu_cache_pmd(vma, address, pmd);
+
+unlock:
+ spin_unlock(ptl);
+}
+
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most