[PATCH 19/43] task_mmu: Convert to vma iterator
From: Liam Howlett
Date: Tue Nov 29 2022 - 11:46:05 EST
From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx>
Use the vma iterator so that the iterator can be invalidated or updated
to avoid each caller doing so.
Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
---
fs/proc/task_mmu.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8a74cdcc9af0..1b0bb36c51b4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -884,7 +884,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
struct vm_area_struct *vma;
unsigned long vma_start = 0, last_vma_end = 0;
int ret = 0;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ VMA_ITERATOR(vmi, mm, 0);
priv->task = get_proc_task(priv->inode);
if (!priv->task)
@@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
goto out_put_mm;
hold_task_mempolicy(priv);
- vma = mas_find(&mas, ULONG_MAX);
+ vma = vma_next(&vmi);
if (unlikely(!vma))
goto empty_set;
@@ -917,7 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* access it for write request.
*/
if (mmap_lock_is_contended(mm)) {
- mas_pause(&mas);
+ vma_iter_set(&vmi, vma->vm_end);
mmap_read_unlock(mm);
ret = mmap_read_lock_killable(mm);
if (ret) {
@@ -961,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* contains last_vma_end.
* Iterate VMA' from last_vma_end.
*/
- vma = mas_find(&mas, ULONG_MAX);
+ vma = vma_next(&vmi);
/* Case 3 above */
if (!vma)
break;
@@ -975,7 +975,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
smap_gather_stats(vma, &mss, last_vma_end);
}
/* Case 2 above */
- } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
+ } for_each_vma(vmi, vma);
empty_set:
show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
@@ -1271,7 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ VMA_ITERATOR(vmi, mm, 0);
struct mmu_notifier_range range;
struct clear_refs_private cp = {
.type = type,
@@ -1291,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
if (type == CLEAR_REFS_SOFT_DIRTY) {
- mas_for_each(&mas, vma, ULONG_MAX) {
+ for_each_vma(vmi, vma) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
vma->vm_flags &= ~VM_SOFTDIRTY;
--
2.35.1