arch/x86/include/asm/fpu/internal.h | 55 +++++++++++++++++-------------------- arch/x86/kernel/fpu/xstate.c | 11 ++++---- 2 files changed, 31 insertions(+), 35 deletions(-) diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 42159f45bf9c..32eb42cae07b 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -211,18 +211,12 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" -#define XSTATE_OP(op, st, lmask, hmask, err) \ - asm volatile("1:" op "\n\t" \ - "xor %[err], %[err]\n" \ - "2:\n\t" \ - ".pushsection .fixup,\"ax\"\n\t" \ - "3: movl $-2,%[err]\n\t" \ - "jmp 2b\n\t" \ - ".popsection\n\t" \ - _ASM_EXTABLE(1b, 3b) \ - : [err] "=r" (err) \ - : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ - : "memory") +#define XSTATE_OP(op, st, lmask, hmask, label) \ + asm_volatile_goto("1:" op "\n\t" \ + _ASM_EXTABLE(1b, %l4) \ + : /* no outputs */ \ + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory" : label) /* * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact @@ -277,7 +271,6 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err; WARN_ON(system_state != SYSTEM_BOOTING); @@ -285,9 +278,11 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) XSTATE_OP(XSAVES, xstate, lmask, hmask, err); else XSTATE_OP(XSAVE, xstate, lmask, hmask, err); + return; +err: /* We should never fault when copying to a kernel buffer: */ - WARN_ON_FPU(err); + WARN_ON_FPU(1); } /* @@ -299,7 +294,6 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err; WARN_ON(system_state != SYSTEM_BOOTING); @@ -307,12 +301,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); + return; - /* - * We should never fault when copying from a kernel buffer, and the FPU - * state we set at boot time should be valid. - */ - WARN_ON_FPU(err); +err: + WARN_ON_FPU(1); } /* @@ -356,21 +348,21 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) */ static inline int copy_xregs_to_user(struct xregs_state __user *buf) { - int err; - /* * Clear the xsave header first, so that reserved fields are * initialized to zero. */ - err = __clear_user(&buf->header, sizeof(buf->header)); - if (unlikely(err)) + if (unlikely(__clear_user(&buf->header, sizeof(buf->header)))) return -EFAULT; stac(); XSTATE_OP(XSAVE, buf, -1, -1, err); clac(); - return err; + return 0; +err: + clac(); + return -EFAULT; } /* @@ -381,13 +373,14 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) struct xregs_state *xstate = ((__force struct xregs_state *)buf); u32 lmask = mask; u32 hmask = mask >> 32; - int err; stac(); XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); clac(); - - return err; + return 0; +err: + clac(); + return -EFAULT; } /* @@ -398,14 +391,16 @@ static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; - int err; if (static_cpu_has(X86_FEATURE_XSAVES)) XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); - return err; + return 0; +err: + /* What would the right error be? */ + return -EINVAL; } /* diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index bda2e5eaca0e..187ed0630b4b 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1311,7 +1311,7 @@ void copy_supervisor_to_kernel(struct xregs_state *xstate) struct xstate_header *header; u64 max_bit, min_bit; u32 lmask, hmask; - int err, i; + int i; if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES))) return; @@ -1326,10 +1326,6 @@ void copy_supervisor_to_kernel(struct xregs_state *xstate) hmask = xfeatures_mask_supervisor() >> 32; XSTATE_OP(XSAVES, xstate, lmask, hmask, err); - /* We should never fault when copying to a kernel buffer: */ - if (WARN_ON_FPU(err)) - return; - /* * At this point, the buffer has only supervisor states and must be * converted back to normal kernel format. @@ -1354,6 +1350,11 @@ void copy_supervisor_to_kernel(struct xregs_state *xstate) xbuf + xstate_supervisor_only_offsets[i], xstate_sizes[i]); } + return; + +err: + /* We should never fault when copying to a kernel buffer: */ + WARN_ON_FPU(1); } #ifdef CONFIG_PROC_PID_ARCH_STATUS