[PATCH 3/3] kprobes/x86: Simplify indirect-jump check in retpoline

From: Zhenzhong Duan
Date: Tue Oct 30 2018 - 02:55:49 EST


Since CONFIG_RETPOLINE hard depends on compiler support now, so
replacing indirect-jump check with the range check is safe in that case.

Signed-off-by: Zhenzhong Duan <zhenzhong.duan@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: David Woodhouse <dwmw@xxxxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
arch/x86/kernel/kprobes/opt.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 40b16b2..1136b29 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -203,6 +203,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
return len;
}

+#ifndef CONFIG_RETPOLINE
/* Check whether insn is indirect jump */
static int __insn_is_indirect_jump(struct insn *insn)
{
@@ -210,6 +211,7 @@ static int __insn_is_indirect_jump(struct insn *insn)
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
insn->opcode.bytes[0] == 0xea); /* Segment based jump */
}
+#endif

/* Check whether insn jumps into specified address range */
static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
@@ -240,20 +242,16 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)

static int insn_is_indirect_jump(struct insn *insn)
{
- int ret = __insn_is_indirect_jump(insn);
+ int ret;

#ifdef CONFIG_RETPOLINE
- /*
- * Jump to x86_indirect_thunk_* is treated as an indirect jump.
- * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
- * older gcc may use indirect jump. So we add this check instead of
- * replace indirect-jump check.
- */
- if (!ret)
+ /* Jump to x86_indirect_thunk_* is treated as an indirect jump. */
ret = insn_jump_into_range(insn,
(unsigned long)__indirect_thunk_start,
(unsigned long)__indirect_thunk_end -
(unsigned long)__indirect_thunk_start);
+#else
+ ret = __insn_is_indirect_jump(insn);
#endif
return ret;
}
--
1.8.3.1