[PATCH v2 21/24] perf: Further simplify perf_mmap()
From: Peter Zijlstra
Date: Wed Feb 05 2025 - 05:26:58 EST
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/events/core.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6661,9 +6661,18 @@ static int perf_mmap(struct file *file,
return ret;
vma_size = vma->vm_end - vma->vm_start;
+ nr_pages = vma_size / PAGE_SIZE;
+
+ if (nr_pages > INT_MAX)
+ return -ENOMEM;
+
+ if (vma_size != PAGE_SIZE * nr_pages)
+ return -EINVAL;
+
+ user_extra = nr_pages;
if (vma->vm_pgoff == 0) {
- nr_pages = (vma_size / PAGE_SIZE) - 1;
+ nr_pages -= 1;
/*
* If we have rb pages ensure they're a power-of-two number, so we
@@ -6672,9 +6681,6 @@ static int perf_mmap(struct file *file,
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
- if (vma_size != PAGE_SIZE * (1 + nr_pages))
- return -EINVAL;
-
WARN_ON_ONCE(event->ctx->parent_ctx);
again:
mutex_lock(&event->mmap_mutex);
@@ -6697,8 +6703,6 @@ static int perf_mmap(struct file *file,
rb = event->rb;
goto unlock;
}
-
- user_extra = nr_pages + 1;
} else {
/*
* AUX area mapping: if rb->aux_nr_pages != 0, it's already
@@ -6710,10 +6714,6 @@ static int perf_mmap(struct file *file,
if (!event->rb)
return -EINVAL;
- nr_pages = vma_size / PAGE_SIZE;
- if (nr_pages > INT_MAX)
- return -ENOMEM;
-
mutex_lock(&event->mmap_mutex);
ret = -EINVAL;
@@ -6757,7 +6757,6 @@ static int perf_mmap(struct file *file,
}
atomic_set(&rb->aux_mmap_count, 1);
- user_extra = nr_pages;
}
/* We need the rb to map pages. */