[PATCH v2] rust: page: add byte-wise atomic memory copy methods
From: Andreas Hindborg
Date: Thu Feb 12 2026 - 09:54:35 EST
When copying data from buffers that are mapped to user space, it is
impossible to guarantee absence of concurrent memory operations on those
buffers. Copying data to/from `Page` from/to these buffers would be
undefined behavior if no special considerations are made.
Add methods on `Page` to read and write the contents using byte-wise atomic
operations.
Also improve clarity by specifying additional requirements on
`read_raw`/`write_raw` methods regarding concurrent operations on involved
buffers.
Signed-off-by: Andreas Hindborg <a.hindborg@xxxxxxxxxx>
---
Changes in v2:
- Rewrite patch with byte-wise atomic operations as foundation of operation.
- Update subject and commit message.
- Link to v1: https://lore.kernel.org/r/20260130-page-volatile-io-v1-1-19f3d3e8f265@xxxxxxxxxx
---
rust/kernel/page.rs | 65 ++++++++++++++++++++++++++++++++++++++++++++++
rust/kernel/sync/atomic.rs | 32 +++++++++++++++++++++++
2 files changed, 97 insertions(+)
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
index 432fc0297d4a8..febe9621adee6 100644
--- a/rust/kernel/page.rs
+++ b/rust/kernel/page.rs
@@ -7,6 +7,7 @@
bindings,
error::code::*,
error::Result,
+ ffi::c_void,
uaccess::UserSliceReader,
};
use core::{
@@ -260,6 +261,8 @@ fn with_pointer_into_page<T>(
/// # Safety
///
/// * Callers must ensure that `dst` is valid for writing `len` bytes.
+ /// * Callers must ensure that there are no other concurrent reads or writes to/from the
+ /// destination memory region.
/// * Callers must ensure that this call does not race with a write to the same page that
/// overlaps with this read.
pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
@@ -274,6 +277,34 @@ pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result
})
}
+ /// Maps the page and reads from it into the given IO memory region using byte-wise atomic
+ /// memory operations.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ /// Callers must ensure that:
+ ///
+ /// - `dst` is valid for writes for `len` bytes for the duration of the call.
+ /// - For the duration of the call, other accesses to the area described by `dst` and `len`,
+ /// must not cause data races (defined by [`LKMM`]) against atomic operations executed by this
+ /// function. Note that if all other accesses are atomic, then this safety requirement is
+ /// trivially fulfilled.
+ ///
+ /// [`LKMM`]: srctree/tools/memory-model
+ pub unsafe fn read_bytewise_atomic(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |src| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then
+ // it has performed a bounds check and guarantees that `src` is
+ // valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race at the source.
+ unsafe { bindings::memcpy_toio(dst.cast::<c_void>(), src.cast::<c_void>(), len) };
+ Ok(())
+ })
+ }
+
/// Maps the page and writes into it from the given buffer.
///
/// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
@@ -282,6 +313,7 @@ pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result
/// # Safety
///
/// * Callers must ensure that `src` is valid for reading `len` bytes.
+ /// * Callers must ensure that there are no concurrent writes to the source memory region.
/// * Callers must ensure that this call does not race with a read or write to the same page
/// that overlaps with this write.
pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Result {
@@ -295,6 +327,39 @@ pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Res
})
}
+ /// Maps the page and writes into it from the given IO memory region using byte-wise atomic
+ /// memory operations.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ ///
+ /// - `src` is valid for reads for `len` bytes for the duration of the call.
+ /// - For the duration of the call, other accesses to the area described by `src` and `len`,
+ /// must not cause data races (defined by [`LKMM`]) against atomic operations executed by this
+ /// function. Note that if all other accesses are atomic, then this safety requirement is
+ /// trivially fulfilled.
+ ///
+ /// [`LKMM`]: srctree/tools/memory-model
+ pub unsafe fn write_bytewise_atomic(
+ &self,
+ src: *const u8,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race at the destination.
+ unsafe { bindings::memcpy_fromio(dst.cast::<c_void>(), src.cast::<c_void>(), len) };
+ Ok(())
+ })
+ }
+
/// Maps the page and zeroes the given slice.
///
/// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs
index 4aebeacb961a2..8ab20126a88cf 100644
--- a/rust/kernel/sync/atomic.rs
+++ b/rust/kernel/sync/atomic.rs
@@ -560,3 +560,35 @@ pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering)
unsafe { from_repr(ret) }
}
}
+
+/// Copy `len` bytes from `src` to `dst` using byte-wise atomic operations.
+///
+/// This copy operation is volatile.
+///
+/// # Safety
+///
+/// Callers must ensure that:
+///
+/// - `src` is valid for reads for `len` bytes for the duration of the call.
+/// - `dst` is valid for writes for `len` bytes for the duration of the call.
+/// - For the duration of the call, other accesses to the areas described by `src`, `dst` and `len`,
+/// must not cause data races (defined by [`LKMM`]) against atomic operations executed by this
+/// function. Note that if all other accesses are atomic, then this safety requirement is
+/// trivially fulfilled.
+///
+/// [`LKMM`]: srctree/tools/memory-model
+pub unsafe fn atomic_per_byte_memcpy(src: *const u8, dst: *mut u8, len: usize) {
+ // SAFETY: By the safety requirements of this function, the following operation will not:
+ // - Trap.
+ // - Invalidate any reference invariants.
+ // - Race with any operation by the Rust AM, as `bindings::memcpy` is a byte-wise atomic
+ // operation and all operations by the Rust AM to the involved memory areas use byte-wise
+ // atomic semantics.
+ unsafe {
+ bindings::memcpy(
+ dst.cast::<kernel::ffi::c_void>(),
+ src.cast::<kernel::ffi::c_void>(),
+ len,
+ )
+ };
+}
---
base-commit: 63804fed149a6750ffd28610c5c1c98cce6bd377
change-id: 20260130-page-volatile-io-05ff595507d3
Best regards,
--
Andreas Hindborg <a.hindborg@xxxxxxxxxx>