[PATCH 2/3] rust: sync: generic memory barriers
From: Gary Guo
Date: Thu Apr 02 2026 - 11:29:46 EST
From: Gary Guo <gary@xxxxxxxxxxx>
Implement a generic interface for memory barriers (full system/DMA/SMP).
The interface uses a parameter to force user to specify their intent with
barriers.
It provides `Read`, `Write`, `Full` orderings which map to the existing
`rmb()`, `wmb()` and `mb()`, but also `Acquire` and `Release` which is
documented to have `LOAD->{LOAD,STORE}` ordering and `{LOAD,STORE}->WRITE`
ordering, although for now they're still mapped to a full `mb()`. But in
the future it could be mapped to a more efficient form depending on the
architecture. I included them as many users do not need the STORE->LOAD
ordering, and having them use `Acquire`/`Release` is more clear on their
intent in what reordering is to be prevented.
Generic is used here instead of providing individual standalone functions
to reduce code duplication. For example, the `Acquire` -> `Full` upgrade
here is uniformly implemented for all three types. The `CONFIG_SMP` check
in `smp_mb` is uniformly implemented for all SMP barriers. This could
extend to `virt_mb`'s if they're introduced in the future.
Signed-off-by: Gary Guo <gary@xxxxxxxxxxx>
---
rust/kernel/sync/atomic/ordering.rs | 2 +-
rust/kernel/sync/barrier.rs | 194 ++++++++++++++++++++++++----
2 files changed, 168 insertions(+), 28 deletions(-)
diff --git a/rust/kernel/sync/atomic/ordering.rs b/rust/kernel/sync/atomic/ordering.rs
index 3f103aa8db99..c4e732e7212f 100644
--- a/rust/kernel/sync/atomic/ordering.rs
+++ b/rust/kernel/sync/atomic/ordering.rs
@@ -15,7 +15,7 @@
//! - It provides ordering between the annotated operation and all the following memory accesses.
//! - It provides ordering between all the preceding memory accesses and all the following memory
//! accesses.
-//! - All the orderings are the same strength as a full memory barrier (i.e. `smp_mb()`).
+//! - All the orderings are the same strength as a full memory barrier (i.e. `smp_mb(Full)`).
//! - [`Relaxed`] provides no ordering except the dependency orderings. Dependency orderings are
//! described in "DEPENDENCY RELATIONS" in [`LKMM`]'s [`explanation`].
//!
diff --git a/rust/kernel/sync/barrier.rs b/rust/kernel/sync/barrier.rs
index 8f2d435fcd94..0331bb353a76 100644
--- a/rust/kernel/sync/barrier.rs
+++ b/rust/kernel/sync/barrier.rs
@@ -7,6 +7,23 @@
//!
//! [`LKMM`]: srctree/tools/memory-model/
+#![expect(private_bounds, reason = "sealed implementation")]
+
+pub use super::atomic::ordering::{
+ Acquire,
+ Full,
+ Release, //
+};
+
+/// The annotation type for read operations.
+pub struct Read;
+
+/// The annotation type for write operations.
+pub struct Write;
+
+struct Smp;
+struct Dma;
+
/// A compiler barrier.
///
/// A barrier that prevents compiler from reordering memory accesses across the barrier.
@@ -19,43 +36,166 @@ pub(crate) fn barrier() {
unsafe { core::arch::asm!("") };
}
-/// A full memory barrier.
-///
-/// A barrier that prevents compiler and CPU from reordering memory accesses across the barrier.
-#[inline(always)]
-pub fn smp_mb() {
- if cfg!(CONFIG_SMP) {
- // SAFETY: `smp_mb()` is safe to call.
- unsafe { bindings::smp_mb() };
- } else {
- barrier();
+trait MemoryBarrier<Flavour = ()> {
+ fn run();
+}
+
+// Currently kernel only support `rmb`, `wmb` and full `mb`.
+// Upgrade `Acquire`/`Release` barriers to full barriers.
+
+impl<F> MemoryBarrier<F> for Acquire
+where
+ Full: MemoryBarrier<F>,
+{
+ #[inline]
+ fn run() {
+ Full::run();
}
}
-/// A write-write memory barrier.
-///
-/// A barrier that prevents compiler and CPU from reordering memory write accesses across the
-/// barrier.
-#[inline(always)]
-pub fn smp_wmb() {
- if cfg!(CONFIG_SMP) {
+impl<F> MemoryBarrier<F> for Release
+where
+ Full: MemoryBarrier<F>,
+{
+ #[inline]
+ fn run() {
+ Full::run();
+ }
+}
+
+// Specific barrier implementations.
+
+impl MemoryBarrier for Read {
+ #[inline]
+ fn run() {
+ // SAFETY: `rmb()` is safe to call.
+ unsafe { bindings::rmb() };
+ }
+}
+
+impl MemoryBarrier for Write {
+ #[inline]
+ fn run() {
+ // SAFETY: `wmb()` is safe to call.
+ unsafe { bindings::wmb() };
+ }
+}
+
+impl MemoryBarrier for Full {
+ #[inline]
+ fn run() {
+ // SAFETY: `mb()` is safe to call.
+ unsafe { bindings::mb() };
+ }
+}
+
+impl MemoryBarrier<Dma> for Read {
+ #[inline]
+ fn run() {
+ // SAFETY: `dma_rmb()` is safe to call.
+ unsafe { bindings::dma_rmb() };
+ }
+}
+
+impl MemoryBarrier<Dma> for Write {
+ #[inline]
+ fn run() {
+ // SAFETY: `dma_wmb()` is safe to call.
+ unsafe { bindings::dma_wmb() };
+ }
+}
+
+impl MemoryBarrier<Dma> for Full {
+ #[inline]
+ fn run() {
+ // SAFETY: `dma_mb()` is safe to call.
+ unsafe { bindings::dma_mb() };
+ }
+}
+
+impl MemoryBarrier<Smp> for Read {
+ #[inline]
+ fn run() {
+ // SAFETY: `smp_rmb()` is safe to call.
+ unsafe { bindings::smp_rmb() };
+ }
+}
+
+impl MemoryBarrier<Smp> for Write {
+ #[inline]
+ fn run() {
// SAFETY: `smp_wmb()` is safe to call.
unsafe { bindings::smp_wmb() };
- } else {
- barrier();
}
}
-/// A read-read memory barrier.
+impl MemoryBarrier<Smp> for Full {
+ #[inline]
+ fn run() {
+ // SAFETY: `smp_mb()` is safe to call.
+ unsafe { bindings::smp_mb() };
+ }
+}
+
+/// Memory barrier.
///
-/// A barrier that prevents compiler and CPU from reordering memory read accesses across the
-/// barrier.
-#[inline(always)]
-pub fn smp_rmb() {
+/// A barrier that prevents compiler and CPU from reordering memory accesses across the barrier.
+///
+/// The specific forms of reordering can be specified using the parameter.
+/// - `mb(Read)` provides a read-read barrier.
+/// - `mb(Write)` provides a write-write barrier.
+/// - `mb(Full)` provides a full barrier.
+/// - `mb(Acquire)` prevents preceding read from being ordered against succeeding memory
+/// operations.
+/// - `mb(Release)` prevents preceding memory operations from being ordered against succeeding
+/// writes.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::barrier::*;
+/// mb(Read);
+/// mb(Write);
+/// mb(Acquire);
+/// mb(Release);
+/// mb(Full);
+/// ```
+#[inline]
+#[doc(alias = "rmb")]
+#[doc(alias = "wmb")]
+pub fn mb<T: MemoryBarrier>(_: T) {
+ T::run()
+}
+
+/// Memory barrier between CPUs.
+///
+/// A barrier that prevents compiler and CPU from reordering memory accesses across the barrier.
+/// Does not prevent re-ordering with respect to other bus-mastering devices.
+///
+/// Prefer using `Acquire` [loads](super::atomic::Atomic::load) to `Acquire` barriers, and `Release`
+/// [stores](super::atomic::Atomic::store) to `Release` barriers.
+///
+/// See [`mb`] for usage.
+#[inline]
+#[doc(alias = "smp_rmb")]
+#[doc(alias = "smp_wmb")]
+pub fn smp_mb<T: MemoryBarrier<Smp>>(_: T) {
if cfg!(CONFIG_SMP) {
- // SAFETY: `smp_rmb()` is safe to call.
- unsafe { bindings::smp_rmb() };
+ T::run()
} else {
- barrier();
+ barrier()
}
}
+
+/// Memory barrier between local CPU and bus-mastering devices.
+///
+/// A barrier that prevents compiler and CPU from reordering memory accesses across the barrier.
+/// Does not prevent re-ordering with respect to other CPUs.
+///
+/// See [`mb`] for usage.
+#[inline]
+#[doc(alias = "dma_rmb")]
+#[doc(alias = "dma_wmb")]
+pub fn dma_mb<T: MemoryBarrier<Dma>>(_: T) {
+ T::run()
+}
--
2.51.2