[WIP 1/3] rust: Introduce atomic module

From: Boqun Feng
Date: Fri Mar 22 2024 - 19:39:48 EST


Although Rust has its own memory ordering model (in the standard C++
memory model), having two models is not wise to start with: it increases
the difficulty for correctness reasoning. Since we use Linux Kernel
Memory Model for C code in kernel, it makes sense that Rust code also
uses LKMM, therefore introduce a module to provide LKMM atomic
primitives.

Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
---
rust/kernel/sync.rs | 1 +
rust/kernel/sync/atomic.rs | 42 ++++++++++++++++++++++++++++
rust/kernel/sync/atomic/arch.rs | 9 ++++++
rust/kernel/sync/atomic/arch/x86.rs | 43 +++++++++++++++++++++++++++++
4 files changed, 95 insertions(+)
create mode 100644 rust/kernel/sync/atomic.rs
create mode 100644 rust/kernel/sync/atomic/arch.rs
create mode 100644 rust/kernel/sync/atomic/arch/x86.rs

diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index c983f63fd56e..dc2d26712f26 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -8,6 +8,7 @@
use crate::types::Opaque;

mod arc;
+pub mod atomic;
mod condvar;
pub mod lock;
mod locked_by;
diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs
new file mode 100644
index 000000000000..280040705fb0
--- /dev/null
+++ b/rust/kernel/sync/atomic.rs
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Atomic and barrier primitives.
+//!
+//! These primitives should have the same semantics as their C counterparts, for precise definitions
+//! of the semantics, please refer to tools/memory-model. Note that Linux Kernel Memory
+//! (Consistency) Model is the only model for Rust development in kernel right now, please avoid to
+//! use Rust's own atomics.
+
+use core::cell::UnsafeCell;
+
+mod arch;
+
+/// An atomic `i32`.
+pub struct AtomicI32(pub(crate) UnsafeCell<i32>);
+
+impl AtomicI32 {
+ /// Creates a new atomic value.
+ pub fn new(v: i32) -> Self {
+ Self(UnsafeCell::new(v))
+ }
+
+ /// Adds `i` to the atomic variable with RELAXED ordering.
+ ///
+ /// Returns the old value before the add.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// use kernel::sync::atomic::AtomicI32;
+ ///
+ /// let a = AtomicI32::new(0);
+ /// let b = a.fetch_add_relaxed(1);
+ /// let c = a.fetch_add_relaxed(2);
+ ///
+ /// assert_eq!(b, 0);
+ /// assert_eq!(c, 1);
+ /// ```
+ pub fn fetch_add_relaxed(&self, i: i32) -> i32 {
+ arch::i32_fetch_add_relaxed(&self.0, i)
+ }
+}
diff --git a/rust/kernel/sync/atomic/arch.rs b/rust/kernel/sync/atomic/arch.rs
new file mode 100644
index 000000000000..3eb5a103a69a
--- /dev/null
+++ b/rust/kernel/sync/atomic/arch.rs
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Architectural atomic and barrier primitives.
+
+#[cfg(CONFIG_X86)]
+pub(crate) use x86::*;
+
+#[cfg(CONFIG_X86)]
+pub(crate) mod x86;
diff --git a/rust/kernel/sync/atomic/arch/x86.rs b/rust/kernel/sync/atomic/arch/x86.rs
new file mode 100644
index 000000000000..2d715f740b22
--- /dev/null
+++ b/rust/kernel/sync/atomic/arch/x86.rs
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! x86 implementation for atomic and barrier primitives.
+
+use core::arch::asm;
+use core::cell::UnsafeCell;
+
+/// Generates an instruction with "lock" prefix.
+#[cfg(CONFIG_SMP)]
+macro_rules! lock_instr {
+ ($i:literal) => { concat!("lock; ", $i) }
+}
+
+#[cfg(not(CONFIG_SMP))]
+macro_rules! lock_instr {
+ ($i:literal) => { $i }
+}
+
+/// Atomically exchanges and adds `i` to `*v` in a wrapping way.
+///
+/// Return the old value before the addition.
+///
+/// # Safety
+///
+/// The caller need to make sure `v` points to a valid `i32`.
+unsafe fn i32_xadd(v: *mut i32, mut i: i32) -> i32 {
+ // SAFETY: Per function safety requirement, the address of `v` is valid for "xadd".
+ unsafe {
+ asm!(
+ lock_instr!("xaddl {i:e}, ({v})"),
+ i = inout(reg) i,
+ v = in(reg) v,
+ options(att_syntax, preserves_flags),
+ );
+ }
+
+ i
+}
+
+pub(crate) fn i32_fetch_add_relaxed(v: &UnsafeCell<i32>, i: i32) -> i32 {
+ // SAFETY: `v.get()` points to a valid `i32`.
+ unsafe { i32_xadd(v.get(), i) }
+}
--
2.44.0