[PATCH 08/13] rust: sync: atomic: Add performance-optimal Flag type for atomic booleans

From: Boqun Feng

Date: Tue Mar 03 2026 - 15:17:46 EST


From: FUJITA Tomonori <fujita.tomonori@xxxxxxxxx>

Add AtomicFlag type for boolean flags.

Document when AtomicFlag is generally preferable to Atomic<bool>: in
particular, when RMW operations such as xchg()/cmpxchg() may be used
and minimizing memory usage is not the top priority. On some
architectures without byte-sized RMW instructions, Atomic<bool> can be
slower for RMW operations.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@xxxxxxxxx>
Reviewed-by: Gary Guo <gary@xxxxxxxxxxx>
Signed-off-by: Boqun Feng <boqun@xxxxxxxxxx>
Link: https://patch.msgid.link/20260129122622.3896144-2-tomo@xxxxxxxxxxxx
---
rust/kernel/sync/atomic.rs | 125 +++++++++++++++++++++++++++
rust/kernel/sync/atomic/predefine.rs | 17 ++++
2 files changed, 142 insertions(+)

diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs
index f4c3ab15c8a7..f80cebce5bc1 100644
--- a/rust/kernel/sync/atomic.rs
+++ b/rust/kernel/sync/atomic.rs
@@ -578,3 +578,128 @@ pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering)
unsafe { from_repr(ret) }
}
}
+
+#[cfg(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64))]
+#[repr(C)]
+#[derive(Clone, Copy)]
+struct Flag {
+ bool_field: bool,
+}
+
+/// # Invariants
+///
+/// `padding` must be all zeroes.
+#[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))]
+#[repr(C, align(4))]
+#[derive(Clone, Copy)]
+struct Flag {
+ #[cfg(target_endian = "big")]
+ padding: [u8; 3],
+ bool_field: bool,
+ #[cfg(target_endian = "little")]
+ padding: [u8; 3],
+}
+
+impl Flag {
+ #[inline(always)]
+ const fn new(b: bool) -> Self {
+ // INVARIANT: `padding` is all zeroes.
+ Self {
+ bool_field: b,
+ #[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))]
+ padding: [0; 3],
+ }
+ }
+}
+
+// SAFETY: `Flag` and `Repr` have the same size and alignment, and `Flag` is round-trip
+// transmutable to the selected representation (`i8` or `i32`).
+unsafe impl AtomicType for Flag {
+ #[cfg(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64))]
+ type Repr = i8;
+ #[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))]
+ type Repr = i32;
+}
+
+/// An atomic flag type intended to be backed by performance-optimal integer type.
+///
+/// The backing integer type is an implementation detail; it may vary by architecture and change
+/// in the future.
+///
+/// [`AtomicFlag`] is generally preferable to [`Atomic<bool>`] when you need read-modify-write
+/// (RMW) operations (e.g. [`Atomic::xchg()`]/[`Atomic::cmpxchg()`]) or when [`Atomic<bool>`] does
+/// not save memory due to padding. On some architectures that do not support byte-sized atomic
+/// RMW operations, RMW operations on [`Atomic<bool>`] are slower.
+///
+/// If you only use [`Atomic::load()`]/[`Atomic::store()`], [`Atomic<bool>`] is fine.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::atomic::{AtomicFlag, Relaxed};
+///
+/// let flag = AtomicFlag::new(false);
+/// assert_eq!(false, flag.load(Relaxed));
+/// flag.store(true, Relaxed);
+/// assert_eq!(true, flag.load(Relaxed));
+/// ```
+pub struct AtomicFlag(Atomic<Flag>);
+
+impl AtomicFlag {
+ /// Creates a new atomic flag.
+ #[inline(always)]
+ pub const fn new(b: bool) -> Self {
+ Self(Atomic::new(Flag::new(b)))
+ }
+
+ /// Returns a mutable reference to the underlying flag as a [`bool`].
+ ///
+ /// This is safe because the mutable reference of the atomic flag guarantees exclusive access.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::sync::atomic::{AtomicFlag, Relaxed};
+ ///
+ /// let mut atomic_flag = AtomicFlag::new(false);
+ /// assert_eq!(false, atomic_flag.load(Relaxed));
+ /// *atomic_flag.get_mut() = true;
+ /// assert_eq!(true, atomic_flag.load(Relaxed));
+ /// ```
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut bool {
+ &mut self.0.get_mut().bool_field
+ }
+
+ /// Loads the value from the atomic flag.
+ #[inline(always)]
+ pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, o: Ordering) -> bool {
+ self.0.load(o).bool_field
+ }
+
+ /// Stores a value to the atomic flag.
+ #[inline(always)]
+ pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: bool, o: Ordering) {
+ self.0.store(Flag::new(v), o);
+ }
+
+ /// Stores a value to the atomic flag and returns the previous value.
+ #[inline(always)]
+ pub fn xchg<Ordering: ordering::Ordering>(&self, new: bool, o: Ordering) -> bool {
+ self.0.xchg(Flag::new(new), o).bool_field
+ }
+
+ /// Store a value to the atomic flag if the current value is equal to `old`.
+ #[inline(always)]
+ pub fn cmpxchg<Ordering: ordering::Ordering>(
+ &self,
+ old: bool,
+ new: bool,
+ o: Ordering,
+ ) -> Result<bool, bool> {
+ match self.0.cmpxchg(Flag::new(old), Flag::new(new), o) {
+ Ok(_) => Ok(old),
+ Err(f) => Err(f.bool_field),
+ }
+ }
+}
diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs
index 6f2c60529b64..ceb3caed9784 100644
--- a/rust/kernel/sync/atomic/predefine.rs
+++ b/rust/kernel/sync/atomic/predefine.rs
@@ -272,4 +272,21 @@ fn atomic_ptr_tests() {
);
assert_eq!(x.load(Relaxed), &raw const u);
}
+
+ #[test]
+ fn atomic_flag_tests() {
+ let mut flag = AtomicFlag::new(false);
+
+ assert_eq!(false, flag.load(Relaxed));
+
+ *flag.get_mut() = true;
+ assert_eq!(true, flag.load(Relaxed));
+
+ assert_eq!(true, flag.xchg(false, Relaxed));
+ assert_eq!(false, flag.load(Relaxed));
+
+ *flag.get_mut() = true;
+ assert_eq!(Ok(true), flag.cmpxchg(true, false, Full));
+ assert_eq!(false, flag.load(Relaxed));
+ }
}
--
2.50.1 (Apple Git-155)