Re: drm pull for v5.3-rc1
From: Linus Torvalds
Date: Mon Jul 15 2019 - 18:18:07 EST
On Mon, Jul 15, 2019 at 1:07 PM Linus Torvalds
<torvalds@xxxxxxxxxxxxxxxxxxxx> wrote:
>
> The mm_walk struct is indeed a bit similar, and is in fact a bit
> problematic exactly because it mixes function pointers with non-const
> data.
This made me look at how nasty that would be to fix.
Not too bad.
The attached patch does add more lines than it removes, but in most
cases it's actually a clear improvement.
It results in:
- smaller stackframes and less runtime initialization: the bulk of
the 'mm_walk' structure was the ops pointers, and if we split them out
and make them const, we can just initialize them statically, and the
stack footprint now becomes just a single word.
- the function pointers are now nicely in a const data section
in addition to the whole "don't mix variable data with constants, and
don't put function pointers on the stack" thing.
Of course, I haven't _tested_ the end result, but since it compiles it
must be perfect, right? Not that I tested all of the build either,
since several of the mm_walk users were for other architectures.
I'm not sure this is really worth it, but I'm throwing the patch out
there in case somebody wants to look.
Andrew, comments? I don't think we have anybody who is in charge of
mm_walk outside of you...
Linus
arch/openrisc/kernel/dma.c | 10 ++++--
arch/powerpc/mm/book3s64/subpage_prot.c | 5 ++-
arch/s390/mm/gmap.c | 25 ++++++++++----
fs/proc/task_mmu.c | 40 +++++++++++++++-------
include/linux/mm.h | 23 +++++++++----
mm/hmm.c | 34 ++++++++++---------
mm/madvise.c | 10 ++++--
mm/memcontrol.c | 11 ++++--
mm/mempolicy.c | 5 ++-
mm/migrate.c | 20 +++++------
mm/mincore.c | 5 ++-
mm/mprotect.c | 5 ++-
mm/pagewalk.c | 60 +++++++++++++++++++--------------
13 files changed, 165 insertions(+), 88 deletions(-)
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index b41a79fcdbd9..1a69a66fe257 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -80,8 +80,11 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
{
unsigned long va;
void *page;
- struct mm_walk walk = {
+ static const struct mm_walk_ops ops = {
.pte_entry = page_set_nocache,
+ };
+ struct mm_walk walk = {
+ .ops = &ops,
.mm = &init_mm
};
@@ -111,8 +114,11 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long va = (unsigned long)vaddr;
- struct mm_walk walk = {
+ static const struct mm_walk_ops ops = {
.pte_entry = page_clear_nocache,
+ };
+ struct mm_walk walk = {
+ .ops = &ops,
.mm = &init_mm
};
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 9ba07e55c489..7876b316138b 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -143,9 +143,12 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct vm_area_struct *vma;
+ static const struct mm_walk_ops ops = {
+ .pmd_entry = subpage_walk_pmd_entry,
+ };
struct mm_walk subpage_proto_walk = {
+ .ops = &ops,
.mm = mm,
- .pmd_entry = subpage_walk_pmd_entry,
};
/*
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 1e668b95e0c6..9e0feeb469c2 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2523,9 +2523,14 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
static inline void zap_zero_pages(struct mm_struct *mm)
{
- struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
+ static const struct mm_walk_ops ops = {
+ .pmd_entry = __zap_zero_pages,
+ };
+ struct mm_walk walk = {
+ .ops = &ops,
+ .mm = mm,
+ };
- walk.mm = mm;
walk_page_range(0, TASK_SIZE, &walk);
}
@@ -2591,11 +2596,15 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
int s390_enable_skey(void)
{
- struct mm_walk walk = {
+ static const struct mm_walk_ops ops = {
.hugetlb_entry = __s390_enable_skey_hugetlb,
.pte_entry = __s390_enable_skey_pte,
};
struct mm_struct *mm = current->mm;
+ struct mm_walk walk = {
+ .ops = &ops,
+ .mm = mm,
+ };
struct vm_area_struct *vma;
int rc = 0;
@@ -2614,7 +2623,6 @@ int s390_enable_skey(void)
}
mm->def_flags &= ~VM_MERGEABLE;
- walk.mm = mm;
walk_page_range(0, TASK_SIZE, &walk);
out_up:
@@ -2635,10 +2643,15 @@ static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
void s390_reset_cmma(struct mm_struct *mm)
{
- struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
+ static const struct mm_walk_ops ops = {
+ .pte_entry = __s390_reset_cmma,
+ };
+ struct mm_walk walk = {
+ .ops = &ops,
+ .mm = mm,
+ };
down_write(&mm->mmap_sem);
- walk.mm = mm;
walk_page_range(0, TASK_SIZE, &walk);
up_write(&mm->mmap_sem);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 818cedbed95f..fb5710830ffc 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -729,16 +729,19 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
}
return 0;
}
+#else
+ #define smaps_hugetlb_range NULL
#endif /* HUGETLB_PAGE */
static void smap_gather_stats(struct vm_area_struct *vma,
struct mem_size_stats *mss)
{
- struct mm_walk smaps_walk = {
+ static const struct mm_walk_ops ops = {
.pmd_entry = smaps_pte_range,
-#ifdef CONFIG_HUGETLB_PAGE
.hugetlb_entry = smaps_hugetlb_range,
-#endif
+ };
+ struct mm_walk smaps_walk = {
+ .ops = &ops,
.mm = vma->vm_mm,
};
@@ -764,8 +767,13 @@ static void smap_gather_stats(struct vm_area_struct *vma,
!(vma->vm_flags & VM_WRITE)) {
mss->swap += shmem_swapped;
} else {
+ static const struct mm_walk_ops ops = {
+ .pmd_entry = smaps_pte_range,
+ .hugetlb_entry = smaps_hugetlb_range,
+ .pte_hole = smaps_pte_hole,
+ };
mss->check_shmem_swap = true;
- smaps_walk.pte_hole = smaps_pte_hole;
+ smaps_walk.ops = &ops;
}
}
#endif
@@ -1150,9 +1158,12 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
struct clear_refs_private cp = {
.type = type,
};
- struct mm_walk clear_refs_walk = {
+ static const struct mm_walk_ops ops = {
.pmd_entry = clear_refs_pte_range,
.test_walk = clear_refs_test_walk,
+ };
+ struct mm_walk clear_refs_walk = {
+ .ops = &ops,
.mm = mm,
.private = &cp,
};
@@ -1488,6 +1499,8 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
return err;
}
+#else
+ #define pagemap_hugetlb_range NULL
#endif /* HUGETLB_PAGE */
/*
@@ -1521,7 +1534,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
{
struct mm_struct *mm = file->private_data;
struct pagemapread pm;
- struct mm_walk pagemap_walk = {};
+ static const struct mm_walk_ops ops = {
+ .pmd_entry = pagemap_pmd_range,
+ .pte_hole = pagemap_pte_hole,
+ .hugetlb_entry = pagemap_hugetlb_range,
+ };
+ struct mm_walk pagemap_walk = { .ops = &ops, };
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
@@ -1549,11 +1567,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!pm.buffer)
goto out_mm;
- pagemap_walk.pmd_entry = pagemap_pmd_range;
- pagemap_walk.pte_hole = pagemap_pte_hole;
-#ifdef CONFIG_HUGETLB_PAGE
- pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
-#endif
pagemap_walk.mm = mm;
pagemap_walk.private = ±
@@ -1808,9 +1821,12 @@ static int show_numa_map(struct seq_file *m, void *v)
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
- struct mm_walk walk = {
+ static const struct mm_walk_ops ops = {
.hugetlb_entry = gather_hugetlb_stats,
.pmd_entry = gather_pte_stats,
+ };
+ struct mm_walk walk = {
+ .ops = &ops,
.private = md,
.mm = mm,
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0389c34ac529..8133f24a3a28 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1426,8 +1426,10 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);
+struct mm_walk;
+
/**
- * mm_walk - callbacks for walk_page_range
+ * mm_walk_ops - callbacks for walk_page_range
* @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
* this handler should only handle pud_trans_huge() puds.
* the pmd_entry or pte_entry callbacks will be used for
@@ -1444,13 +1446,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* value means "do page table walk over the current vma,"
* and a negative one means "abort current page table walk
* right now." 1 means "skip the current vma."
- * @mm: mm_struct representing the target process of page table walk
- * @vma: vma currently walked (NULL if walking outside vmas)
- * @private: private data for callbacks' usage
- *
- * (see the comment on walk_page_range() for more details)
*/
-struct mm_walk {
+struct mm_walk_ops {
int (*pud_entry)(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
@@ -1464,6 +1461,18 @@ struct mm_walk {
struct mm_walk *walk);
int (*test_walk)(unsigned long addr, unsigned long next,
struct mm_walk *walk);
+};
+
+/**
+ * mm_walk - walk_page_range data
+ * @mm: mm_struct representing the target process of page table walk
+ * @vma: vma currently walked (NULL if walking outside vmas)
+ * @private: private data for callbacks' usage
+ *
+ * (see the comment on walk_page_range() for more details)
+ */
+struct mm_walk {
+ const struct mm_walk_ops *ops;
struct mm_struct *mm;
struct vm_area_struct *vma;
void *private;
diff --git a/mm/hmm.c b/mm/hmm.c
index e1eedef129cf..756843ffa7cb 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -961,7 +961,15 @@ long hmm_range_snapshot(struct hmm_range *range)
struct hmm_vma_walk hmm_vma_walk;
struct hmm *hmm = range->hmm;
struct vm_area_struct *vma;
- struct mm_walk mm_walk;
+ static const struct mm_walk_ops ops = {
+ .pud_entry = hmm_vma_walk_pud,
+ .pmd_entry = hmm_vma_walk_pmd,
+ .pte_hole = hmm_vma_walk_hole,
+ .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
+ };
+ struct mm_walk mm_walk = {
+ .ops = &ops,
+ };
lockdep_assert_held(&hmm->mm->mmap_sem);
do {
@@ -1004,13 +1012,6 @@ long hmm_range_snapshot(struct hmm_range *range)
mm_walk.vma = vma;
mm_walk.mm = vma->vm_mm;
- mm_walk.pte_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.pud_entry = hmm_vma_walk_pud;
- mm_walk.pmd_entry = hmm_vma_walk_pmd;
- mm_walk.pte_hole = hmm_vma_walk_hole;
- mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
walk_page_range(start, end, &mm_walk);
start = end;
@@ -1055,7 +1056,15 @@ long hmm_range_fault(struct hmm_range *range, bool block)
struct hmm_vma_walk hmm_vma_walk;
struct hmm *hmm = range->hmm;
struct vm_area_struct *vma;
- struct mm_walk mm_walk;
+ static const struct mm_walk_ops ops = {
+ .pud_entry = hmm_vma_walk_pud,
+ .pmd_entry = hmm_vma_walk_pmd,
+ .pte_hole = hmm_vma_walk_hole,
+ .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
+ };
+ struct mm_walk mm_walk = {
+ .ops = &ops,
+ };
int ret;
lockdep_assert_held(&hmm->mm->mmap_sem);
@@ -1103,13 +1112,6 @@ long hmm_range_fault(struct hmm_range *range, bool block)
mm_walk.vma = vma;
mm_walk.mm = vma->vm_mm;
- mm_walk.pte_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.pud_entry = hmm_vma_walk_pud;
- mm_walk.pmd_entry = hmm_vma_walk_pmd;
- mm_walk.pte_hole = hmm_vma_walk_hole;
- mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
do {
ret = walk_page_range(start, end, &mm_walk);
diff --git a/mm/madvise.c b/mm/madvise.c
index 968df3aa069f..b9700060cafb 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -228,9 +228,12 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
static void force_swapin_readahead(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ static const struct mm_walk_ops ops = {
+ .pmd_entry = swapin_walk_pmd_entry,
+ };
struct mm_walk walk = {
+ .ops = &ops,
.mm = vma->vm_mm,
- .pmd_entry = swapin_walk_pmd_entry,
.private = vma,
};
@@ -444,8 +447,11 @@ static void madvise_free_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
- struct mm_walk free_walk = {
+ static const struct mm_walk_ops ops = {
.pmd_entry = madvise_free_pte_range,
+ };
+ struct mm_walk free_walk = {
+ .ops = &ops,
.mm = vma->vm_mm,
.private = tlb,
};
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 249671873aa9..6d3912b9e508 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5239,9 +5239,11 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
unsigned long precharge;
-
- struct mm_walk mem_cgroup_count_precharge_walk = {
+ static const struct mm_walk_ops ops = {
.pmd_entry = mem_cgroup_count_precharge_pte_range,
+ };
+ struct mm_walk mem_cgroup_count_precharge_walk = {
+ .ops = &ops,
.mm = mm,
};
down_read(&mm->mmap_sem);
@@ -5517,8 +5519,11 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
static void mem_cgroup_move_charge(void)
{
- struct mm_walk mem_cgroup_move_charge_walk = {
+ static const struct mm_walk_ops ops = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
+ };
+ struct mm_walk mem_cgroup_move_charge_walk = {
+ .ops = &ops,
.mm = mc.mm,
};
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f48693f75b37..ca10c9b55333 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -652,10 +652,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
.nmask = nodes,
.prev = NULL,
};
- struct mm_walk queue_pages_walk = {
+ static const struct mm_walk_ops ops = {
.hugetlb_entry = queue_pages_hugetlb,
.pmd_entry = queue_pages_pte_range,
.test_walk = queue_pages_test_walk,
+ };
+ struct mm_walk queue_pages_walk = {
+ .ops = &ops,
.mm = mm,
.private = &qp,
};
diff --git a/mm/migrate.c b/mm/migrate.c
index 3445747e229d..0d1da0d72011 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2339,16 +2339,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- struct mm_walk mm_walk;
-
- mm_walk.pmd_entry = migrate_vma_collect_pmd;
- mm_walk.pte_entry = NULL;
- mm_walk.pte_hole = migrate_vma_collect_hole;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.vma = migrate->vma;
- mm_walk.mm = migrate->vma->vm_mm;
- mm_walk.private = migrate;
+ static const struct mm_walk_ops ops = {
+ .pmd_entry = migrate_vma_collect_pmd,
+ .pte_hole = migrate_vma_collect_hole,
+ };
+ struct mm_walk mm_walk = {
+ .ops = &ops,
+ .vma = migrate->vma,
+ .mm = migrate->vma->vm_mm,
+ .private = migrate,
+ };
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
migrate->start,
diff --git a/mm/mincore.c b/mm/mincore.c
index 4fe91d497436..8195d2099e77 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -203,10 +203,13 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
struct vm_area_struct *vma;
unsigned long end;
int err;
- struct mm_walk mincore_walk = {
+ static const struct mm_walk_ops ops = {
.pmd_entry = mincore_pte_range,
.pte_hole = mincore_unmapped_range,
.hugetlb_entry = mincore_hugetlb,
+ };
+ struct mm_walk mincore_walk = {
+ .ops = &ops,
.private = vec,
};
diff --git a/mm/mprotect.c b/mm/mprotect.c
index bf38dfbbb4b4..719b5f4b9fe5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -333,10 +333,13 @@ static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags)
{
pgprot_t new_pgprot = vm_get_page_prot(newflags);
- struct mm_walk prot_none_walk = {
+ static const struct mm_walk_ops ops = {
.pte_entry = prot_none_pte_entry,
.hugetlb_entry = prot_none_hugetlb_entry,
.test_walk = prot_none_test,
+ };
+ struct mm_walk prot_none_walk = {
+ .ops = &ops,
.mm = current->mm,
.private = &new_pgprot,
};
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index c3084ff2569d..38ab762aef44 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -9,10 +9,11 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
{
pte_t *pte;
int err = 0;
+ const struct mm_walk_ops *ops = walk->ops;
pte = pte_offset_map(pmd, addr);
for (;;) {
- err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
+ err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
break;
addr += PAGE_SIZE;
@@ -30,6 +31,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
{
pmd_t *pmd;
unsigned long next;
+ const struct mm_walk_ops *ops = walk->ops;
int err = 0;
pmd = pmd_offset(pud, addr);
@@ -37,8 +39,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || !walk->vma) {
- if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ if (ops->pte_hole)
+ err = ops->pte_hole(addr, next, walk);
if (err)
break;
continue;
@@ -47,8 +49,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
* This implies that each ->pmd_entry() handler
* needs to know about pmd_trans_huge() pmds
*/
- if (walk->pmd_entry)
- err = walk->pmd_entry(pmd, addr, next, walk);
+ if (ops->pmd_entry)
+ err = ops->pmd_entry(pmd, addr, next, walk);
if (err)
break;
@@ -56,7 +58,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
* Check this here so we only break down trans_huge
* pages when we _need_ to
*/
- if (!walk->pte_entry)
+ if (!ops->pte_entry)
continue;
split_huge_pmd(walk->vma, pmd, addr);
@@ -75,6 +77,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
{
pud_t *pud;
unsigned long next;
+ const struct mm_walk_ops *ops = walk->ops;
int err = 0;
pud = pud_offset(p4d, addr);
@@ -82,18 +85,18 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
again:
next = pud_addr_end(addr, end);
if (pud_none(*pud) || !walk->vma) {
- if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ if (ops->pte_hole)
+ err = ops->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
- if (walk->pud_entry) {
+ if (ops->pud_entry) {
spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
if (ptl) {
- err = walk->pud_entry(pud, addr, next, walk);
+ err = ops->pud_entry(pud, addr, next, walk);
spin_unlock(ptl);
if (err)
break;
@@ -105,7 +108,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
if (pud_none(*pud))
goto again;
- if (walk->pmd_entry || walk->pte_entry)
+ if (ops->pmd_entry || ops->pte_entry)
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
@@ -119,19 +122,20 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
{
p4d_t *p4d;
unsigned long next;
+ const struct mm_walk_ops *ops = walk->ops;
int err = 0;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d)) {
- if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ if (ops->pte_hole)
+ err = ops->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
- if (walk->pmd_entry || walk->pte_entry)
+ if (ops->pmd_entry || ops->pte_entry)
err = walk_pud_range(p4d, addr, next, walk);
if (err)
break;
@@ -145,19 +149,20 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
{
pgd_t *pgd;
unsigned long next;
+ const struct mm_walk_ops *ops = walk->ops;
int err = 0;
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
- if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ if (ops->pte_hole)
+ err = ops->pte_hole(addr, next, walk);
if (err)
break;
continue;
}
- if (walk->pmd_entry || walk->pte_entry)
+ if (ops->pmd_entry || ops->pte_entry)
err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
@@ -183,6 +188,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
+ const struct mm_walk_ops *ops = walk->ops;
int err = 0;
do {
@@ -190,9 +196,9 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte)
- err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
- else if (walk->pte_hole)
- err = walk->pte_hole(addr, next, walk);
+ err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
+ else if (ops->pte_hole)
+ err = ops->pte_hole(addr, next, walk);
if (err)
break;
@@ -220,9 +226,10 @@ static int walk_page_test(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
+ const struct mm_walk_ops *ops = walk->ops;
- if (walk->test_walk)
- return walk->test_walk(start, end, walk);
+ if (ops->test_walk)
+ return ops->test_walk(start, end, walk);
/*
* vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
@@ -234,8 +241,8 @@ static int walk_page_test(unsigned long start, unsigned long end,
*/
if (vma->vm_flags & VM_PFNMAP) {
int err = 1;
- if (walk->pte_hole)
- err = walk->pte_hole(start, end, walk);
+ if (ops->pte_hole)
+ err = ops->pte_hole(start, end, walk);
return err ? err : 1;
}
return 0;
@@ -248,7 +255,8 @@ static int __walk_page_range(unsigned long start, unsigned long end,
struct vm_area_struct *vma = walk->vma;
if (vma && is_vm_hugetlb_page(vma)) {
- if (walk->hugetlb_entry)
+ const struct mm_walk_ops *ops = walk->ops;
+ if (ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
err = walk_pgd_range(start, end, walk);
@@ -331,7 +339,7 @@ int walk_page_range(unsigned long start, unsigned long end,
if (err < 0)
break;
}
- if (walk->vma || walk->pte_hole)
+ if (walk->vma || walk->ops->pte_hole)
err = __walk_page_range(start, next, walk);
if (err)
break;