[PATCH 4.14 060/167] x86/entry/64: Interleave XOR register clearing with PUSH instructions
From: Greg Kroah-Hartman
Date: Wed Feb 21 2018 - 09:13:54 EST
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Dominik Brodowski <linux@xxxxxxxxxxxxxxxxxxxx>
commit f7bafa2b05ef25eda1d9179fd930b0330cf2b7d1 upstream.
Same as is done for syscalls, interleave XOR with PUSH instructions
for exceptions/interrupts, in order to minimize the cost of the
additional instructions required for register clearing.
Signed-off-by: Dominik Brodowski <linux@xxxxxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: dan.j.williams@xxxxxxxxx
Link: http://lkml.kernel.org/r/20180211104949.12992-4-linux@xxxxxxxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/x86/entry/calling.h | 40 +++++++++++++++++++---------------------
arch/x86/entry/entry_64.S | 30 +++++++++++++++++++++---------
2 files changed, 40 insertions(+), 30 deletions(-)
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -101,44 +101,42 @@ For 32-bit we have the following convent
addq $-(15*8), %rsp
.endm
- .macro SAVE_REGS offset=0
+ .macro SAVE_AND_CLEAR_REGS offset=0
+ /*
+ * Save registers and sanitize registers of values that a
+ * speculation attack might otherwise want to exploit. The
+ * lower registers are likely clobbered well before they
+ * could be put to use in a speculative execution gadget.
+ * Interleave XOR with PUSH for better uop scheduling:
+ */
movq %rdi, 14*8+\offset(%rsp)
movq %rsi, 13*8+\offset(%rsp)
movq %rdx, 12*8+\offset(%rsp)
movq %rcx, 11*8+\offset(%rsp)
movq %rax, 10*8+\offset(%rsp)
movq %r8, 9*8+\offset(%rsp)
+ xorq %r8, %r8 /* nospec r8 */
movq %r9, 8*8+\offset(%rsp)
+ xorq %r9, %r9 /* nospec r9 */
movq %r10, 7*8+\offset(%rsp)
+ xorq %r10, %r10 /* nospec r10 */
movq %r11, 6*8+\offset(%rsp)
+ xorq %r11, %r11 /* nospec r11 */
movq %rbx, 5*8+\offset(%rsp)
+ xorl %ebx, %ebx /* nospec rbx */
movq %rbp, 4*8+\offset(%rsp)
+ xorl %ebp, %ebp /* nospec rbp */
movq %r12, 3*8+\offset(%rsp)
+ xorq %r12, %r12 /* nospec r12 */
movq %r13, 2*8+\offset(%rsp)
+ xorq %r13, %r13 /* nospec r13 */
movq %r14, 1*8+\offset(%rsp)
+ xorq %r14, %r14 /* nospec r14 */
movq %r15, 0*8+\offset(%rsp)
+ xorq %r15, %r15 /* nospec r15 */
UNWIND_HINT_REGS offset=\offset
.endm
- /*
- * Sanitize registers of values that a speculation attack
- * might otherwise want to exploit. The lower registers are
- * likely clobbered well before they could be put to use in
- * a speculative execution gadget:
- */
- .macro CLEAR_REGS_NOSPEC
- xorl %ebp, %ebp
- xorl %ebx, %ebx
- xorq %r8, %r8
- xorq %r9, %r9
- xorq %r10, %r10
- xorq %r11, %r11
- xorq %r12, %r12
- xorq %r13, %r13
- xorq %r14, %r14
- xorq %r15, %r15
- .endm
-
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14
@@ -177,7 +175,7 @@ For 32-bit we have the following convent
* is just setting the LSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise.
*
- * NOTE: This macro must be used *after* SAVE_REGS because it corrupts
+ * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts
* the original rbp.
*/
.macro ENCODE_FRAME_POINTER ptregs_offset=0
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -561,8 +561,7 @@ END(irq_entries_start)
1:
ALLOC_PT_GPREGS_ON_STACK
- SAVE_REGS
- CLEAR_REGS_NOSPEC
+ SAVE_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
testb $3, CS(%rsp)
@@ -1108,8 +1107,7 @@ ENTRY(xen_failsafe_callback)
UNWIND_HINT_IRET_REGS
pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK
- SAVE_REGS
- CLEAR_REGS_NOSPEC
+ SAVE_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
jmp error_exit
END(xen_failsafe_callback)
@@ -1153,8 +1151,7 @@ idtentry machine_check do_mce has_err
ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
cld
- SAVE_REGS 8
- CLEAR_REGS_NOSPEC
+ SAVE_AND_CLEAR_REGS 8
ENCODE_FRAME_POINTER 8
movl $1, %ebx
movl $MSR_GS_BASE, %ecx
@@ -1205,8 +1202,7 @@ END(paranoid_exit)
ENTRY(error_entry)
UNWIND_HINT_FUNC
cld
- SAVE_REGS 8
- CLEAR_REGS_NOSPEC
+ SAVE_AND_CLEAR_REGS 8
ENCODE_FRAME_POINTER 8
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1393,18 +1389,34 @@ ENTRY(nmi)
pushq (%rdx) /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq %rax /* pt_regs->ax */
+ /*
+ * Sanitize registers of values that a speculation attack
+ * might otherwise want to exploit. The lower registers are
+ * likely clobbered well before they could be put to use in
+ * a speculative execution gadget. Interleave XOR with PUSH
+ * for better uop scheduling:
+ */
pushq %r8 /* pt_regs->r8 */
+ xorq %r8, %r8 /* nospec r8 */
pushq %r9 /* pt_regs->r9 */
+ xorq %r9, %r9 /* nospec r9 */
pushq %r10 /* pt_regs->r10 */
+ xorq %r10, %r10 /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
+ xorq %r11, %r11 /* nospec r11*/
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */
+ xorq %r12, %r12 /* nospec r12*/
pushq %r13 /* pt_regs->r13 */
+ xorq %r13, %r13 /* nospec r13*/
pushq %r14 /* pt_regs->r14 */
+ xorq %r14, %r14 /* nospec r14*/
pushq %r15 /* pt_regs->r15 */
+ xorq %r15, %r15 /* nospec r15*/
UNWIND_HINT_REGS
- CLEAR_REGS_NOSPEC
ENCODE_FRAME_POINTER
/*