[PATCH v3 1/6] x86/bugs: Rename entry_ibpb()

From: Josh Poimboeuf
Date: Wed Apr 02 2025 - 14:20:15 EST


There's nothing entry-specific about entry_ibpb(). In preparation for
calling it from elsewhere, rename it to __write_ibpb().

Signed-off-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
---
arch/x86/entry/entry.S | 7 ++++---
arch/x86/include/asm/nospec-branch.h | 6 +++---
arch/x86/kernel/cpu/bugs.c | 6 +++---
3 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index d3caa31240ed..3a53319988b9 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -17,7 +17,8 @@

.pushsection .noinstr.text, "ax"

-SYM_FUNC_START(entry_ibpb)
+// Clobbers AX, CX, DX
+SYM_FUNC_START(__write_ibpb)
ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
movl $PRED_CMD_IBPB, %eax
@@ -27,9 +28,9 @@ SYM_FUNC_START(entry_ibpb)
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(__write_ibpb)
/* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(__write_ibpb);

.popsection

diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 8a5cc8e70439..bbac79cad04c 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -269,7 +269,7 @@
* typically has NO_MELTDOWN).
*
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * __write_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
@@ -279,7 +279,7 @@
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \
- "call entry_ibpb", \ibpb_feature, \
+ "call __write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm
@@ -368,7 +368,7 @@ extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);

extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void __write_ibpb(void);

#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4386aa6c69e1..310cb3f7139c 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1142,7 +1142,7 @@ static void __init retbleed_select_mitigation(void)
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);

/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: __write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -2676,7 +2676,7 @@ static void __init srso_select_mitigation(void)
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);

/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: __write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
@@ -2701,7 +2701,7 @@ static void __init srso_select_mitigation(void)
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;

/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: __write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
--
2.48.1