Re: [PATCH] Convert filldir[64]() from __put_user() to unsafe_put_user()
From: Linus Torvalds
Date: Thu Oct 10 2019 - 20:31:35 EST
On Thu, Oct 10, 2019 at 5:11 PM Al Viro <viro@xxxxxxxxxxxxxxxxxx> wrote:
>
> On Thu, Oct 10, 2019 at 03:12:49PM -0700, Linus Torvalds wrote:
>
> > But I've not gotten around to rewriting those disgusting sequences to
> > the unsafe_get/put_user() model. I did look at it, and it requires
> > some changes exactly *because* the _ex() functions are broken and
> > continue, but also because the current code ends up also doing other
> > things inside the try/catch region that you're not supposed to do in a
> > user_access_begin/end() region .
>
> Hmm... Which one was that? AFAICS, we have
> do_sys_vm86: only get_user_ex()
> restore_sigcontext(): get_user_ex(), set_user_gs()
> ia32_restore_sigcontext(): get_user_ex()
Try this patch.
It works fine (well, it worked fine the lastr time I tried this, I
might have screwed something up just now: I re-created the patch since
I hadn't saved it).
It's nice and clean, and does
1 file changed, 9 insertions(+), 91 deletions(-)
by just deleting all the nasty *_ex() macros entirely, replacing them
with unsafe_get/put_user() calls.
And now those try/catch regions actually work like try/catch regions,
and a fault branches to the catch.
BUT.
It does change semantics, and you get warnings like
arch/x86/ia32/ia32_signal.c: In function âia32_restore_sigcontextâ:
arch/x86/ia32/ia32_signal.c:114:9: warning: âbufâ may be used
uninitialized in this function [-Wmaybe-uninitialized]
114 | err |= fpu__restore_sig(buf, 1);
| ^~~~~~~~~~~~~~~~~~~~~~~~
arch/x86/ia32/ia32_signal.c:64:27: warning: âdsâ may be used
uninitialized in this function [-Wmaybe-uninitialized]
64 | unsigned int pre = (seg) | 3; \
| ^
arch/x86/ia32/ia32_signal.c:74:18: note: âdsâ was declared here
...
arch/x86/kernel/signal.c: In function ârestore_sigcontextâ:
arch/x86/kernel/signal.c:152:9: warning: âbufâ may be used
uninitialized in this function [-Wmaybe-uninitialized]
152 | err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
because it's true: those things reall may not be initialized, because
the catch thing could have jumped out.
So the code actually needs to properly return the error early, or
initialize the segments that didn't get loaded to 0, or something.
And when I posted that, Luto said "just get rid of the get_user_ex()
entirely, instead of changing semantics of the existing ones to be
sane.
Which is probably right. There aren't that many.
I *thought* there were also cases of us doing some questionably things
inside the get_user_try sections, but those seem to have gotten fixed
already independently, so it's really just the "make try/catch really
try/catch" change that needs some editing of our current broken stuff
that depends on it not actually *catching* exceptions, but on just
continuing on to the next one.
Linus
arch/x86/include/asm/uaccess.h | 100 ++++-------------------------------------
1 file changed, 9 insertions(+), 91 deletions(-)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 61d93f062a36..e87d8911dc53 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -193,23 +193,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: : "A" (x), "r" (addr) \
: : label)
-#define __put_user_asm_ex_u64(x, addr) \
- asm volatile("\n" \
- "1: movl %%eax,0(%1)\n" \
- "2: movl %%edx,4(%1)\n" \
- "3:" \
- _ASM_EXTABLE_EX(1b, 2b) \
- _ASM_EXTABLE_EX(2b, 3b) \
- : : "A" (x), "r" (addr))
-
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_goto_u64(x, ptr, label) \
__put_user_goto(x, ptr, "q", "", "er", label)
-#define __put_user_asm_ex_u64(x, addr) \
- __put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
@@ -289,31 +278,6 @@ do { \
} \
} while (0)
-/*
- * This doesn't do __uaccess_begin/end - the exception handling
- * around it must do that.
- */
-#define __put_user_size_ex(x, ptr, size) \
-do { \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: \
- __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
- break; \
- case 2: \
- __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
- break; \
- case 4: \
- __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
- break; \
- case 8: \
- __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
- break; \
- default: \
- __put_user_bad(); \
- } \
-} while (0)
-
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) \
({ \
@@ -334,13 +298,9 @@ do { \
: "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
})
-
-#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
-#define __get_user_asm_ex_u64(x, ptr) \
- __get_user_asm_ex(x, ptr, "q", "", "=r")
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
@@ -390,41 +350,6 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
-/*
- * This doesn't do __uaccess_begin/end - the exception handling
- * around it must do that.
- */
-#define __get_user_size_ex(x, ptr, size) \
-do { \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: \
- __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
- break; \
- case 2: \
- __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
- break; \
- case 4: \
- __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
- break; \
- case 8: \
- __get_user_asm_ex_u64(x, ptr); \
- break; \
- default: \
- (x) = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:xor"itype" %"rtype"0,%"rtype"0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_EX(1b, 3b) \
- : ltype(x) : "m" (__m(addr)))
-
#define __put_user_nocheck(x, ptr, size) \
({ \
__label__ __pu_label; \
@@ -480,27 +405,25 @@ struct __large_struct { unsigned long buf[100]; };
retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
} while (0)
-#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
- "2:\n" \
- _ASM_EXTABLE_EX(1b, 2b) \
- : : ltype(x), "m" (__m(addr)))
-
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
- current->thread.uaccess_err = 0; \
+ __label__ __uaccess_catch_efault; \
__uaccess_begin(); \
barrier();
#define uaccess_try_nospec do { \
- current->thread.uaccess_err = 0; \
+ __label__ __uaccess_catch_efault; \
__uaccess_begin_nospec(); \
#define uaccess_catch(err) \
__uaccess_end(); \
- (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
+ (err) = 0; \
+ break; \
+__uaccess_catch_efault: \
+ __uaccess_end(); \
+ (err) = -EFAULT; \
} while (0)
/**
@@ -562,17 +485,12 @@ struct __large_struct { unsigned long buf[100]; };
#define get_user_try uaccess_try_nospec
#define get_user_catch(err) uaccess_catch(err)
-#define get_user_ex(x, ptr) do { \
- unsigned long __gue_val; \
- __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
- (x) = (__force __typeof__(*(ptr)))__gue_val; \
-} while (0)
+#define get_user_ex(x, ptr) unsafe_get_user(x, ptr, __uaccess_catch_efault)
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
-#define put_user_ex(x, ptr) \
- __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define put_user_ex(x, ptr) unsafe_put_user(x, ptr, __uaccess_catch_efault)
extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n);