[RFC 07/17] x86/asm/64: Merge the fast and slow SYSRET paths
From: Andy Lutomirski
Date: Wed Sep 06 2017 - 17:39:49 EST
Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx>
---
arch/x86/entry/entry_64.S | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 45d4cb8fd81b..a9e318f7cc9b 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -216,9 +216,8 @@ entry_SYSCALL_64_fastpath:
TRACE_IRQS_ON /* user mode is traced as IRQs on */
movq RIP(%rsp), %rcx
movq EFLAGS(%rsp), %r11
- RESTORE_C_REGS_EXCEPT_RCX_R11
- movq RSP(%rsp), %rsp
- USERGS_SYSRET64
+ addq $6*8, %rsp
+ jmp .Lpop_c_regs_and_sysret
1:
/*
@@ -309,6 +308,7 @@ return_from_SYSCALL_64:
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
POP_EXTRA_REGS
+.Lpop_c_regs_and_sysret:
popq %rsi /* skip r11 */
popq %r10
popq %r9
--
2.13.5