[PATCH v2] perf/ring_buffer: Fix exposing a temporarily decreased data_head.

From: Yabin Cui
Date: Thu May 16 2019 - 14:42:36 EST


In perf_output_put_handle(), an IRQ/NMI can happen in below location and
write records to the same ring buffer:
...
local_dec_and_test(&rb->nest)
... <-- an IRQ/NMI can happen here
rb->user_page->data_head = head;
...

In this case, a value A is written to data_head in the IRQ, then a value
B is written to data_head after the IRQ. And A > B. As a result,
data_head is temporarily decreased from A to B. And a reader may see
data_head < data_tail if it read the buffer frequently enough, which
creates unexpected behaviors.

This can be fixed by moving dec(&rb->nest) to after updating data_head,
which prevents the IRQ/NMI above from updating data_head.

Signed-off-by: Yabin Cui <yabinc@xxxxxxxxxx>
---

v1 -> v2: change rb->nest from local_t to unsigned int, and add barriers.

---
kernel/events/internal.h | 2 +-
kernel/events/ring_buffer.c | 24 ++++++++++++++++++------
2 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 79c47076700a..0a8c003b9bcf 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -24,7 +24,7 @@ struct ring_buffer {
atomic_t poll; /* POLL_ for wakeups */

local_t head; /* write position */
- local_t nest; /* nested writers */
+ unsigned int nest; /* nested writers */
local_t events; /* event limit */
local_t wakeup; /* wakeup stamp */
local_t lost; /* nr records lost */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 674b35383491..c677beb01fb1 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -38,7 +38,8 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
struct ring_buffer *rb = handle->rb;

preempt_disable();
- local_inc(&rb->nest);
+ rb->nest++;
+ barrier();
handle->wakeup = local_read(&rb->wakeup);
}

@@ -54,8 +55,10 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
* IRQ/NMI can happen here, which means we can miss a head update.
*/

- if (!local_dec_and_test(&rb->nest))
+ if (rb->nest > 1) {
+ rb->nest--;
goto out;
+ }

/*
* Since the mmap() consumer (userspace) can run on a different CPU:
@@ -84,14 +87,23 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
* See perf_output_begin().
*/
smp_wmb(); /* B, matches C */
- rb->user_page->data_head = head;
+ WRITE_ONCE(rb->user_page->data_head, head);
+
+ /*
+ * Clear rb->nest after updating data_head. This prevents IRQ/NMI from
+ * updating data_head before us. If that happens, we will expose a
+ * temporarily decreased data_head.
+ */
+ WRITE_ONCE(rb->nest, 0);

/*
- * Now check if we missed an update -- rely on previous implied
- * compiler barriers to force a re-read.
+ * Now check if we missed an update -- use barrier() to force a
+ * re-read.
*/
+ barrier();
if (unlikely(head != local_read(&rb->head))) {
- local_inc(&rb->nest);
+ rb->nest++;
+ barrier();
goto again;
}

--
2.21.0.1020.gf2820cf01a-goog