Re: [PATCH v2] rust: page: add byte-wise atomic memory copy methods
From: Peter Zijlstra
Date: Tue Feb 17 2026 - 08:01:36 EST
On Tue, Feb 17, 2026 at 01:09:20PM +0100, Peter Zijlstra wrote:
> On Tue, Feb 17, 2026 at 11:51:20AM +0000, Alice Ryhl wrote:
>
> > In my experience with dealing with `struct page` that is mapped into a
> > vma, you need memcpy because the struct might be split across two
> > different pages in the vma. The pages are adjacent in userspace's
> > address space, but not necessarily adjacent from the kernel's POV.
> >
> > So you might end up with something that looks like this:
> >
> > struct foo val;
> > void *ptr1 = kmap_local_page(p1);
> > void *ptr2 = kmap_local_page(p2);
> > memcpy(ptr1 + offset, val, PAGE_SIZE - offset);
> > memcpy(ptr2, val + offset, sizeof(struct foo) - (PAGE_SIZE - offset));
> > kunmap_local(ptr2);
> > kunmap_local(ptr1);
>
> barrier();
>
> > if (is_valid(&val)) {
> > // use val
> > }
> >
> > This exact thing happens in Binder. It has to be a memcpy.
>
> Sure, but then stick that one barrier() in and you're good.
Anyway, I don't think something like the below is an unreasonable patch.
It ensures all accesses to the ptr obtained from kmap_local_*() and
released by kunmap_local() stays inside those two.
---
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 0574c21ca45d..2fe71b715a46 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -185,31 +185,42 @@ static inline void kunmap(const struct page *page)
static inline void *kmap_local_page(const struct page *page)
{
- return page_address(page);
+ void *addr = page_address(page);
+ barrier();
+ return addr;
}
static inline void *kmap_local_page_try_from_panic(const struct page *page)
{
- return page_address(page);
+ void *addr = page_address(page);
+ barrier();
+ return addr;
}
static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
{
- return folio_address(folio) + offset;
+ void *addr = folio_address(folio) + offset;
+ barrier();
+ return addr;
}
static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
- return kmap_local_page(page);
+ void *addr = kmap_local_page(page);
+ barrier();
+ return addr;
}
static inline void *kmap_local_pfn(unsigned long pfn)
{
- return kmap_local_page(pfn_to_page(pfn));
+ void *addr = kmap_local_page(pfn_to_page(pfn));
+ barrier();
+ return addr;
}
static inline void __kunmap_local(const void *addr)
{
+ barrier();
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif