[tip: x86/asm] x86/boot: Annotate local functions

From: tip-bot2 for Jiri Slaby
Date: Fri Oct 18 2019 - 12:33:13 EST


The following commit has been merged into the x86/asm branch of tip:

Commit-ID: deff8a24e1021fb39dddf5f6bc5832e0e3a632ea
Gitweb: https://git.kernel.org/tip/deff8a24e1021fb39dddf5f6bc5832e0e3a632ea
Author: Jiri Slaby <jslaby@xxxxxxx>
AuthorDate: Fri, 11 Oct 2019 13:50:47 +02:00
Committer: Borislav Petkov <bp@xxxxxxx>
CommitterDate: Fri, 18 Oct 2019 10:29:34 +02:00

x86/boot: Annotate local functions

.Lrelocated, .Lpaging_enabled, .Lno_longmode, and .Lin_pm32 are
self-standing local functions, annotate them as such and preserve "no
alignment".

The annotations do not generate anything yet.

Signed-off-by: Jiri Slaby <jslaby@xxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Cc: Cao jin <caoj.fnst@xxxxxxxxxxxxxx>
Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Kate Stewart <kstewart@xxxxxxxxxxxxxxxxxxx>
Cc: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx>
Cc: linux-arch@xxxxxxxxxxxxxxx
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Wei Huang <wei@xxxxxxxxxx>
Cc: x86-ml <x86@xxxxxxxxxx>
Cc: Xiaoyao Li <xiaoyao.li@xxxxxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/20191011115108.12392-8-jslaby@xxxxxxx
---
arch/x86/boot/compressed/head_32.S | 3 ++-
arch/x86/boot/compressed/head_64.S | 9 ++++++---
arch/x86/boot/pmjump.S | 4 ++--
3 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 5e30eaa..f9e2a80 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -209,7 +209,7 @@ ENDPROC(efi32_stub_entry)
#endif

.text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)

/*
* Clear BSS (stack is currently empty)
@@ -260,6 +260,7 @@ ENDPROC(efi32_stub_entry)
*/
xorl %ebx, %ebx
jmp *%eax
+SYM_FUNC_END(.Lrelocated)

#ifdef CONFIG_EFI_STUB
.data
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d98cd48..7afe6e0 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -511,7 +511,7 @@ ENDPROC(efi64_stub_entry)
#endif

.text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)

/*
* Clear BSS (stack is currently empty)
@@ -540,6 +540,7 @@ ENDPROC(efi64_stub_entry)
* Jump to the decompressed kernel.
*/
jmp *%rax
+SYM_FUNC_END(.Lrelocated)

/*
* Adjust the global offset table
@@ -635,9 +636,10 @@ ENTRY(trampoline_32bit_src)
lret

.code64
-.Lpaging_enabled:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
/* Return from the trampoline */
jmp *%rdi
+SYM_FUNC_END(.Lpaging_enabled)

/*
* The trampoline code has a size limit.
@@ -647,11 +649,12 @@ ENTRY(trampoline_32bit_src)
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE

.code32
-.Lno_longmode:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
hlt
jmp 1b
+SYM_FUNC_END(.Lno_longmode)

#include "../../kernel/verify_cpu.S"

diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index ea88d52..81658fe 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -46,7 +46,7 @@ ENDPROC(protected_mode_jump)

.code32
.section ".text32","ax"
-.Lin_pm32:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32)
# Set up data segments for flat 32-bit mode
movl %ecx, %ds
movl %ecx, %es
@@ -72,4 +72,4 @@ ENDPROC(protected_mode_jump)
lldt %cx

jmpl *%eax # Jump to the 32-bit entrypoint
-ENDPROC(.Lin_pm32)
+SYM_FUNC_END(.Lin_pm32)