[PATCH v5 1/3] ring-buffer: Flush and stop persistent ring buffer on panic

From: Masami Hiramatsu (Google)

Date: Thu Feb 26 2026 - 08:39:07 EST


From: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>

On real hardware, panic and machine reboot may not flush hardware cache
to memory. This means the persistent ring buffer, which relies on a
coherent state of memory, may not have its events written to the buffer
and they may be lost. Moreover, there may be inconsistency with the
counters which are used for validation of the integrity of the
persistent ring buffer which may cause all data to be discarded.

To avoid this issue, stop recording of the ring buffer on panic and
flush the cache of the ring buffer's memory.

Fixes: e645535a954a ("tracing: Add option to use memmapped memory for trace boot instance")
Cc: stable@xxxxxxxxxxxxxxx
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
---
Changes in v5:
- Use ring_buffer_record_off() instead of ring_buffer_record_disable().
- Use flush_cache_all() to ensure flush all cache.
Changes in v3:
- update patch description.
---
kernel/trace/ring_buffer.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f16f053ef77d..0eb6e6595f37 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -6,6 +6,7 @@
*/
#include <linux/sched/isolation.h>
#include <linux/trace_recursion.h>
+#include <linux/panic_notifier.h>
#include <linux/trace_events.h>
#include <linux/ring_buffer.h>
#include <linux/trace_clock.h>
@@ -589,6 +590,7 @@ struct trace_buffer {

unsigned long range_addr_start;
unsigned long range_addr_end;
+ struct notifier_block flush_nb;

struct ring_buffer_meta *meta;

@@ -2471,6 +2473,15 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
kfree(cpu_buffer);
}

+static int rb_flush_buffer_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct trace_buffer *buffer = container_of(nb, struct trace_buffer, flush_nb);
+
+ ring_buffer_record_off(buffer);
+ flush_cache_all();
+ return NOTIFY_DONE;
+}
+
static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
int order, unsigned long start,
unsigned long end,
@@ -2590,6 +2601,12 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,

mutex_init(&buffer->mutex);

+ /* Persistent ring buffer needs to flush cache before reboot. */
+ if (start & end) {
+ buffer->flush_nb.notifier_call = rb_flush_buffer_cb;
+ atomic_notifier_chain_register(&panic_notifier_list, &buffer->flush_nb);
+ }
+
return_ptr(buffer);

fail_free_buffers:
@@ -2677,6 +2694,9 @@ ring_buffer_free(struct trace_buffer *buffer)
{
int cpu;

+ if (buffer->range_addr_start && buffer->range_addr_end)
+ atomic_notifier_chain_unregister(&panic_notifier_list, &buffer->flush_nb);
+
cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);

irq_work_sync(&buffer->irq_work.work);