[PATCH] x86/acpi: Remove indirect jump from wakeup_long64()

From: Brian Gerst

Date: Sun Jan 18 2026 - 12:05:20 EST


wakeup_long64() is called from common_startup_64() via inital_code, so
it is already running on the normal virtual mapping. There is no need
to use an indirect jump since it is not switching mappings.

Remove the indirect jump by embedding wakeup_long64() as an inner label
of do_suspend_lowlevel(). Remove saved_rip is which is now unused.

No functional change.

Signed-off-by: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Ard Biesheuvel <ardb@xxxxxxxxxx>
Cc: "Rafael J. Wysocki" <rafael@xxxxxxxxxx>
---
arch/x86/kernel/acpi/wakeup_64.S | 64 +++++++++++++-------------------
1 file changed, 26 insertions(+), 38 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 04f561f75e99..a256cdd03ab5 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -13,39 +13,6 @@
# Copyright 2003 Pavel Machek <pavel@xxxxxxx

.code64
- /*
- * Hooray, we are in Long 64-bit mode (but still running in low memory)
- */
-SYM_FUNC_START(wakeup_long64)
- ANNOTATE_NOENDBR
- movq saved_magic(%rip), %rax
- movq $0x123456789abcdef0, %rdx
- cmpq %rdx, %rax
- je 2f
-
- /* stop here on a saved_magic mismatch */
- movq $0xbad6d61676963, %rcx
-1:
- jmp 1b
-2:
- movw $__KERNEL_DS, %ax
- movw %ax, %ss
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %fs
- movw %ax, %gs
- movq saved_rsp(%rip), %rsp
-
- movq saved_rbx(%rip), %rbx
- movq saved_rdi(%rip), %rdi
- movq saved_rsi(%rip), %rsi
- movq saved_rbp(%rip), %rbp
-
- movq saved_rip(%rip), %rax
- ANNOTATE_RETPOLINE_SAFE
- jmp *%rax
-SYM_FUNC_END(wakeup_long64)
-
SYM_FUNC_START(do_suspend_lowlevel)
FRAME_BEGIN
subq $8, %rsp
@@ -71,8 +38,6 @@ SYM_FUNC_START(do_suspend_lowlevel)
pushfq
popq pt_regs_flags(%rax)

- movq $.Lresume_point, saved_rip(%rip)
-
movq %rsp, saved_rsp(%rip)
movq %rbp, saved_rbp(%rip)
movq %rbx, saved_rbx(%rip)
@@ -86,9 +51,27 @@ SYM_FUNC_START(do_suspend_lowlevel)
/* in case something went wrong, restore the machine status and go on */
jmp .Lresume_point

- .align 4
-.Lresume_point:
+SYM_INNER_LABEL_ALIGN(wakeup_long64, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
+ movq saved_magic(%rip), %rax
+ movq $0x123456789abcdef0, %rdx
+ cmpq %rdx, %rax
+ jne .Lbad_saved_magic
+
+ movw $__KERNEL_DS, %ax
+ movw %ax, %ss
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movq saved_rsp(%rip), %rsp
+
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp
+
+.Lresume_point:
/* We don't restore %rax, it must be 0 anyway */
movq $saved_context, %rax
movq saved_context_cr4(%rax), %rbx
@@ -130,6 +113,12 @@ SYM_FUNC_START(do_suspend_lowlevel)
addq $8, %rsp
FRAME_END
jmp restore_processor_state
+
+.Lbad_saved_magic:
+ /* stop here on a saved_magic mismatch */
+ movq $0xbad6d61676963, %rcx
+1:
+ jmp 1b
SYM_FUNC_END(do_suspend_lowlevel)
STACK_FRAME_NON_STANDARD do_suspend_lowlevel

@@ -139,7 +128,6 @@ saved_rsi: .quad 0
saved_rdi: .quad 0
saved_rbx: .quad 0

-saved_rip: .quad 0
saved_rsp: .quad 0

SYM_DATA(saved_magic, .quad 0)

base-commit: 72249a0533c63e77e4bf56012b7b4f8fb3066317
--
2.52.0