Re: [v2] fs/proc/task_mmu: Implement IOCTL for efficient page table scanning

From: Muhammad Usama Anjum
Date: Mon Jul 24 2023 - 10:04:47 EST


Fixed found bugs. Testing it further.

- Split and backoff in case buffer full case as well
- Fix the wrong breaking of loop if page isn't interesting, skip intead
- Untag the address and save them into struct
- Round off the end address to next page

Signed-off-by: Muhammad Usama Anjum <usama.anjum@xxxxxxxxxxxxx>
---
fs/proc/task_mmu.c | 54 ++++++++++++++++++++++++++--------------------
1 file changed, 31 insertions(+), 23 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index add21fdf3c9a..64b326d0ec6d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1764,7 +1764,8 @@ struct pagemap_scan_private {
struct page_region __user* vec_out;
};

-static unsigned long pagemap_page_category(struct vm_area_struct *vma,
unsigned long addr, pte_t pte)
+static unsigned long pagemap_page_category(struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte)
{
unsigned long categories = 0;

@@ -1908,6 +1909,7 @@ static bool pagemap_scan_is_interesting_vma(unsigned
long categories,
categories ^= p->arg.category_inverted;
if ((categories & required) != required)
return false;
+
return true;
}

@@ -1930,6 +1932,7 @@ static int pagemap_scan_test_walk(unsigned long
start, unsigned long end,
return 1;

p->cur_vma_category = vma_category;
+
return 0;
}

@@ -1961,6 +1964,7 @@ static bool pagemap_scan_push_range(unsigned long
categories,
cur_buf->start = addr;
cur_buf->end = end;
cur_buf->categories = categories;
+
return true;
}

@@ -1985,18 +1989,19 @@ static int pagemap_scan_output(unsigned long
categories,
unsigned long n_pages, total_pages;
int ret = 0;

+ if (!p->vec_buf)
+ return 0;
+
if (!pagemap_scan_is_interesting_page(categories, p)) {
*end = addr;
return 0;
}

- if (!p->vec_buf)
- return 0;
-
categories &= p->arg.return_mask;

n_pages = (*end - addr) / PAGE_SIZE;
- if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
total_pages > p->arg.max_pages) {
+ if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
+ total_pages > p->arg.max_pages) {
size_t n_too_much = total_pages - p->arg.max_pages;
*end -= n_too_much * PAGE_SIZE;
n_pages -= n_too_much;
@@ -2012,6 +2017,7 @@ static int pagemap_scan_output(unsigned long categories,
p->found_pages += n_pages;
if (ret)
p->end_addr = *end;
+
return ret;
}

@@ -2044,7 +2050,7 @@ static int pagemap_scan_thp_entry(pmd_t *pmd,
unsigned long start,
* Break huge page into small pages if the WP operation
* need to be performed is on a portion of the huge page.
*/
- if (end != start + HPAGE_SIZE) {
+ if (end != start + HPAGE_SIZE || ret == -ENOSPC) {
spin_unlock(ptl);
split_huge_pmd(vma, pmd, start);
pagemap_scan_backout_range(p, start, end);
@@ -2066,8 +2072,8 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd,
unsigned long start,
{
struct pagemap_scan_private *p = walk->private;
struct vm_area_struct *vma = walk->vma;
+ unsigned long addr, categories, next;
pte_t *pte, *start_pte;
- unsigned long addr;
bool flush = false;
spinlock_t *ptl;
int ret;
@@ -2088,12 +2094,14 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd,
unsigned long start,
}

for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
- unsigned long categories = p->cur_vma_category |
- pagemap_page_category(vma, addr, ptep_get(pte));
- unsigned long next = addr + PAGE_SIZE;
+ categories = p->cur_vma_category |
+ pagemap_page_category(vma, addr, ptep_get(pte));
+ next = addr + PAGE_SIZE;

ret = pagemap_scan_output(categories, p, addr, &next);
- if (next == addr)
+ if (ret == 0 && next == addr)
+ continue;
+ else if (next == addr)
break;

if (~p->arg.flags & PM_SCAN_WP_MATCHING)
@@ -2175,7 +2183,7 @@ static int pagemap_scan_pte_hole(unsigned long addr,
unsigned long end,
{
struct pagemap_scan_private *p = walk->private;
struct vm_area_struct *vma = walk->vma;
- int ret;
+ int ret, err;

if (!vma)
return 0;
@@ -2187,7 +2195,7 @@ static int pagemap_scan_pte_hole(unsigned long addr,
unsigned long end,
if (~p->arg.flags & PM_SCAN_WP_MATCHING)
return ret;

- int err = uffd_wp_range(vma, addr, end - addr, true);
+ err = uffd_wp_range(vma, addr, end - addr, true);
if (err < 0)
ret = err;

@@ -2204,8 +2212,6 @@ static const struct mm_walk_ops pagemap_scan_ops = {
static int pagemap_scan_get_args(struct pm_scan_arg *arg,
unsigned long uarg)
{
- unsigned long start, end, vec;
-
if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
return -EFAULT;

@@ -2219,22 +2225,24 @@ static int pagemap_scan_get_args(struct pm_scan_arg
*arg,
arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
return -EINVAL;

- start = untagged_addr((unsigned long)arg->start);
- end = untagged_addr((unsigned long)arg->end);
- vec = untagged_addr((unsigned long)arg->vec);
+ arg->start = untagged_addr((unsigned long)arg->start);
+ arg->end = untagged_addr((unsigned long)arg->end);
+ arg->vec = untagged_addr((unsigned long)arg->vec);

/* Validate memory pointers */
- if (!IS_ALIGNED(start, PAGE_SIZE))
+ if (!IS_ALIGNED(arg->start, PAGE_SIZE))
return -EINVAL;
- if (!access_ok((void __user *)start, end - start))
+ if (!access_ok((void __user *)arg->start, arg->end - arg->start))
return -EFAULT;
- if (!vec && arg->vec_len)
+ if (!arg->vec && arg->vec_len)
return -EFAULT;
- if (vec && !access_ok((void __user *)vec,
+ if (arg->vec && !access_ok((void __user *)arg->vec,
arg->vec_len * sizeof(struct page_region)))
return -EFAULT;

/* Fixup default values */
+ arg->end = (arg->end & ~PAGE_MASK) ?
+ ((arg->end & PAGE_MASK) + PAGE_SIZE) : (arg->end);
if (!arg->max_pages)
arg->max_pages = ULONG_MAX;

@@ -2279,7 +2287,7 @@ static int pagemap_scan_init_bounce_buffer(struct
pagemap_scan_private *p)
if (!p->vec_buf)
return -ENOMEM;

- p->vec_out = (void __user *)p->arg.vec;
+ p->vec_out = (struct page_region __user *)p->arg.vec;

return 0;
}
--
2.39.2