Re: [PATCH] ARC: uaccess: get_user to zero out dest in cause of fault

From: Linus Torvalds
Date: Fri Aug 19 2016 - 18:11:13 EST


On Fri, Aug 19, 2016 at 3:00 PM, Linus Torvalds
<torvalds@xxxxxxxxxxxxxxxxxxxx> wrote:
>
> (I have some experimental patches that actually use "asm goto" in
> "unsafe_put_user()" to get that nice code generation, but they only
> work if your gcc version supports "asm goto", which some older
> versions of gcc does not)

Since you actually are looking at the user access stuff, I'll just put
them here.

This is from an old branch of mine, based on commit f6c658df6385 just
because that happened to be my top-of-tree when I was playing around
with it. It probably doesn't even apply right now, and as mentioned,
it depends on "asm goto" (there is no case for !CC_HAVE_ASM_GOTO).

With this, you actually get almost perfect code generation if you then
replace all the "put_user_ex()" calls with

if (access_ok(..))
return -EFAULT;

user_access_begin();
unsafe_put_user(x,ptr, error_label);
unsafe_put_user(y,ptr2, error_label);
...
user_access_end();
return 0;

error_label:
user_access_end();
return -EFAULT;

or something similar. The exception handler will jump directly to
"error_label", and there will be no testing of anything at all in the
usual no-exception cases, nor will there be any extra registers for
error values etc.

Linus
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 2982387ba817..849debe7aa10 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -204,19 +204,14 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))


#ifdef CONFIG_X86_32
-#define __put_user_asm_u64(x, addr, err, errret) \
- asm volatile("\n" \
- "1: movl %%eax,0(%2)\n" \
- "2: movl %%edx,4(%2)\n" \
- "3:" \
- ".section .fixup,\"ax\"\n" \
- "4: movl %3,%0\n" \
- " jmp 3b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
- : "=r" (err) \
- : "A" (x), "r" (addr), "i" (errret), "0" (err))
+#define __put_user_goto_u64(x, addr, label) \
+ asm volatile("\n" \
+ "1: movl %%eax,0(%2)\n" \
+ "2: movl %%edx,4(%2)\n" \
+ _ASM_EXTABLE(1b, %2l) \
+ _ASM_EXTABLE(2b, %2l) \
+ : : "A" (x), "r" (addr) \
+ : : label)

#define __put_user_asm_ex_u64(x, addr) \
asm volatile("\n" \
@@ -231,8 +226,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
-#define __put_user_asm_u64(x, ptr, retval, errret) \
- __put_user_asm(x, ptr, retval, "q", "", "er", errret)
+#define __put_user_goto_u64(x, ptr, label) \
+ __put_user_goto(x, ptr, "q", "", "er", label)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
@@ -293,23 +288,21 @@ extern void __put_user_8(void);
__builtin_expect(__ret_pu, 0); \
})

-#define __put_user_size(x, ptr, size, retval, errret) \
+#define __put_user_size(x, ptr, size, label) \
do { \
- retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
+ __put_user_goto(x, ptr, "b", "b", "iq", label); \
break; \
case 2: \
- __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
+ __put_user_goto(x, ptr, "w", "w", "ir", label); \
break; \
case 4: \
- __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
+ __put_user_goto(x, ptr, "l", "k", "ir", label); \
break; \
case 8: \
- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
- errret); \
+ __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
break; \
default: \
__put_user_bad(); \
@@ -419,9 +412,12 @@ do { \

#define __put_user_nocheck(x, ptr, size) \
({ \
- int __pu_err; \
+ __label__ __pu_label; \
+ int __pu_err = -EFAULT; \
__uaccess_begin(); \
- __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __put_user_size((x), (ptr), (size), __pu_label); \
+ __pu_err = 0; \
+__pu_label: \
__uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
@@ -446,17 +442,23 @@ struct __large_struct { unsigned long buf[100]; };
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
-#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile("\n" \
- "1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r"(err) \
- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+#define __put_user_goto(x, addr, itype, rtype, ltype, label) \
+ asm volatile goto("\n" \
+ "1: mov"itype" %"rtype"0,%1\n" \
+ _ASM_EXTABLE(1b, %l2) \
+ : : ltype(x), "m" (__m(addr)) \
+ : : label)
+
+#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
+ ({ __label__ __puflab; \
+ int __pufret = errret; \
+ __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
+ __pufret = 0; \
+ __puflab: __pufret; })
+
+#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
+ retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
+} while (0)

#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
@@ -793,12 +795,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
#define user_access_begin() __uaccess_begin()
#define user_access_end() __uaccess_end()

-#define unsafe_put_user(x, ptr) \
-({ \
- int __pu_err; \
- __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
- __builtin_expect(__pu_err, 0); \
-})
+#define unsafe_put_user(x, ptr, label) \
+ __put_user_size((x), (ptr), sizeof(*(ptr)), label)
+

#define unsafe_get_user(x, ptr) \
({ \