[PATCH v2] riscv: Limit uaccess speculation using guard page

From: Lukas Gerlach

Date: Thu Feb 26 2026 - 10:14:08 EST


User pointers passed to uaccess routines can be speculatively used
before access_ok() validates them, potentially leaking kernel memory.

Clamp any address >= TASK_SIZE to the guard page at TASK_SIZE-1, which
will always fault. The clamp is branchless to prevent speculative bypass.

Unlike the v1 approach of clearing the sign bit, this works with all
paging modes (Sv39/Sv48/Sv57) and does not interfere with the pointer
masking extension (Smnpm).

Similar to commit 4d8efc2d5ee4 ("arm64: Use pointer masking to limit
uaccess speculation").

Signed-off-by: Lukas Gerlach <lukas.gerlach@xxxxxxxx>
---
arch/riscv/include/asm/uaccess.h | 38 +++++++++++++++++++++++++++++---------
1 file changed, 29 insertions(+), 9 deletions(-)

diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 11c9886c3b70..df31df3bd55c 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -74,6 +74,20 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigne
#define __typefits(x, type, not) \
__builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not)

+/*
+ * Sanitize a uaccess pointer such that it cannot reach any kernel address.
+ * Branchlessly clamp any address >= TASK_SIZE to the unmapped guard page
+ * at TASK_SIZE-1, which will always fault on access.
+ */
+#define uaccess_mask_ptr(ptr) ((__typeof__(ptr))__uaccess_mask_ptr(ptr))
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+{
+ unsigned long p = (unsigned long)ptr;
+ unsigned long mask = (unsigned long)((long)(TASK_SIZE - 1 - p) >> 63);
+
+ return (void __user *)((p & ~mask) | ((TASK_SIZE - 1) & mask));
+}
+
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
@@ -245,7 +259,8 @@ __gu_failed: \
*/
#define __get_user(x, ptr) \
({ \
- const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
+ const __typeof__(*(ptr)) __user *__gu_ptr = \
+ uaccess_mask_ptr(untagged_addr(ptr)); \
long __gu_err = 0; \
__typeof__(x) __gu_val; \
\
@@ -376,7 +391,8 @@ err_label: \
*/
#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
+ __typeof__(*(ptr)) __user *__gu_ptr = \
+ uaccess_mask_ptr(untagged_addr(ptr)); \
__typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
@@ -423,13 +439,15 @@ unsigned long __must_check __asm_copy_from_user(void *to,
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __asm_copy_from_user(to, untagged_addr(from), n);
+ return __asm_copy_from_user(to,
+ uaccess_mask_ptr(untagged_addr(from)), n);
}

static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return __asm_copy_to_user(untagged_addr(to), from, n);
+ return __asm_copy_to_user(
+ uaccess_mask_ptr(untagged_addr(to)), from, n);
}

extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -444,7 +462,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
might_fault();
return access_ok(to, n) ?
- __clear_user(untagged_addr(to), n) : n;
+ __clear_user(uaccess_mask_ptr(untagged_addr(to)), n) : n;
}

#define arch_get_kernel_nofault(dst, src, type, err_label) \
@@ -471,20 +489,22 @@ static inline void user_access_restore(unsigned long enabled) { }
* the error labels - thus the macro games.
*/
#define arch_unsafe_put_user(x, ptr, label) \
- __put_user_nocheck(x, (ptr), label)
+ __put_user_nocheck(x, uaccess_mask_ptr(ptr), label)

#define arch_unsafe_get_user(x, ptr, label) do { \
__inttype(*(ptr)) __gu_val; \
- __get_user_nocheck(__gu_val, (ptr), label); \
+ __get_user_nocheck(__gu_val, uaccess_mask_ptr(ptr), label); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)

#define unsafe_copy_to_user(_dst, _src, _len, label) \
- if (__asm_copy_to_user_sum_enabled(_dst, _src, _len)) \
+ if (__asm_copy_to_user_sum_enabled( \
+ uaccess_mask_ptr(_dst), _src, _len)) \
goto label;

#define unsafe_copy_from_user(_dst, _src, _len, label) \
- if (__asm_copy_from_user_sum_enabled(_dst, _src, _len)) \
+ if (__asm_copy_from_user_sum_enabled( \
+ _dst, uaccess_mask_ptr(_src), _len)) \
goto label;

#else /* CONFIG_MMU */

---
base-commit: f4d0ec0aa20d49f09dc01d82894ce80d72de0560
change-id: 20260226-uaccess-guard-v2-7a3358bee742

Best regards,
--
Lukas Gerlach <lukas.gerlach@xxxxxxxx>