[PATCH 18/19] perf: Lift event->mmap_mutex in perf_mmap()
From: Peter Zijlstra
Date: Mon Nov 04 2024 - 08:59:37 EST
This puts 'all' of perf_mmap() under single event->mmap_mutex.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/events/core.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6626,7 +6626,7 @@ static int perf_mmap(struct file *file,
unsigned long vma_size;
unsigned long nr_pages;
long user_extra = 0, extra = 0;
- int ret = 0, flags = 0;
+ int ret, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
@@ -6654,6 +6654,9 @@ static int perf_mmap(struct file *file,
user_extra = nr_pages;
+ mutex_lock(&event->mmap_mutex);
+ ret = -EINVAL;
+
if (vma->vm_pgoff == 0) {
nr_pages -= 1;
@@ -6662,16 +6665,13 @@ static int perf_mmap(struct file *file,
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
- return -EINVAL;
+ goto unlock;
WARN_ON_ONCE(event->ctx->parent_ctx);
- mutex_lock(&event->mmap_mutex);
if (event->rb) {
- if (data_page_nr(event->rb) != nr_pages) {
- ret = -EINVAL;
+ if (data_page_nr(event->rb) != nr_pages)
goto unlock;
- }
if (atomic_inc_not_zero(&event->rb->mmap_count)) {
/*
@@ -6698,12 +6698,6 @@ static int perf_mmap(struct file *file,
*/
u64 aux_offset, aux_size;
- if (!event->rb)
- return -EINVAL;
-
- mutex_lock(&event->mmap_mutex);
- ret = -EINVAL;
-
rb = event->rb;
if (!rb)
goto aux_unlock;
@@ -6813,6 +6807,8 @@ static int perf_mmap(struct file *file,
rb->aux_mmap_locked = extra;
}
+ ret = 0;
+
unlock:
if (!ret) {
atomic_long_add(user_extra, &user->locked_vm);