On Wed, Jul 10, 2024 at 11:50:50PM +0200, Uros Bizjak wrote:...
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d3a814efbff6..d54f6002e5a0 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -253,6 +253,16 @@
.Lend_\@:
.endm
+/*
+ * Safer version of CLEAR_CPU_BUFFERS that uses %ss to reference VERW operand
+ * mds_verw_sel. This ensures VERW will not #GP for an arbitrary user %ds.
+ */
+.macro CLEAR_CPU_BUFFERS_SAFE
+ ALTERNATIVE "jmp .Lskip_verw\@", "", X86_FEATURE_CLEAR_CPU_BUF
+ verw %ss:_ASM_RIP(mds_verw_sel)
+.Lskip_verw\@:
+.endm
Why not simply:
.macro CLEAR_CPU_BUFFERS_SAFE
ALTERNATIVE "", __stringify(verw %ss:_ASM_RIP(mds_verw_sel)),
X86_FEATURE_CLEAR_CPU_BUF
.endm
We can do it this way as well. But, there are stable kernels that don't
support relocations in ALTERNATIVEs. The way it is done in current patch
can be backported without worrying about which kernels support relocations.