[PATCH 1/2] x86/kprobes: Fix kprobes instruction boudary check with CONFIG_RETHUNK
From: Masami Hiramatsu (Google)
Date: Tue Sep 06 2022 - 20:55:36 EST
From: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
Since the CONFIG_RETHUNK and CONFIG_SLS will use INT3 for padding after
RET instruction, kprobes always failes to check the probed instruction
boundary by decoding the function body if the probed address is after
such paddings (Note that some conditional code blocks will be placed
after RET instruction, if compiler decides it is not on the hot path.)
This is because kprobes expects someone (e.g. kgdb) puts the INT3 as
a software breakpoint and it will replace the original instruction.
But There are INT3 just for padding in the function, it doesn't need
to recover the original instruction.
To avoid this issue, if kprobe finds an INT3, it gets the address of
next non-INT3 byte, and search a branch which jumps to the address.
If there is the branch, these INT3 will be for padding, so it can be
skipped.
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Fixes: 15e67227c49a ("x86: Undo return-thunk damage")
Cc: stable@xxxxxxxxxxxxxxx
---
arch/x86/kernel/kprobes/common.h | 67 ++++++++++++++++++++++++++++++++++++++
arch/x86/kernel/kprobes/core.c | 57 ++++++++++++++++++--------------
arch/x86/kernel/kprobes/opt.c | 23 +------------
3 files changed, 100 insertions(+), 47 deletions(-)
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c993521d4933..2adb36eaf366 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -92,6 +92,73 @@ extern int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn);
extern void synthesize_reljump(void *dest, void *from, void *to);
extern void synthesize_relcall(void *dest, void *from, void *to);
+/* Return the jump target address or 0 */
+static inline unsigned long insn_get_branch_addr(struct insn *insn)
+{
+ switch (insn->opcode.bytes[0]) {
+ case 0xe0: /* loopne */
+ case 0xe1: /* loope */
+ case 0xe2: /* loop */
+ case 0xe3: /* jcxz */
+ case 0xe9: /* near relative jump */
+ case 0xeb: /* short relative jump */
+ break;
+ case 0x0f:
+ if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
+ break;
+ return 0;
+ default:
+ if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
+ break;
+ return 0;
+ }
+ return (unsigned long)insn->next_byte + insn->immediate.value;
+}
+
+static inline void __decode_insn(struct insn *insn, kprobe_opcode_t *buf,
+ unsigned long addr)
+{
+ unsigned long recovered_insn;
+
+ /*
+ * Check if the instruction has been modified by another
+ * kprobe, in which case we replace the breakpoint by the
+ * original instruction in our buffer.
+ * Also, jump optimization will change the breakpoint to
+ * relative-jump. Since the relative-jump itself is
+ * normally used, we just go through if there is no kprobe.
+ */
+ recovered_insn = recover_probed_instruction(buf, addr);
+ if (!recovered_insn ||
+ insn_decode_kernel(insn, (void *)recovered_insn) < 0) {
+ insn->kaddr = NULL;
+ } else {
+ /* Recover address */
+ insn->kaddr = (void *)addr;
+ insn->next_byte = (void *)(addr + insn->length);
+ }
+}
+
+/* Iterate instructions in [saddr, eaddr), insn->next_byte is loop cursor. */
+#define for_each_insn(insn, saddr, eaddr, buf) \
+ for (__decode_insn(insn, buf, saddr); \
+ (insn)->kaddr && (unsigned long)(insn)->next_byte < eaddr; \
+ __decode_insn(insn, buf, (unsigned long)(insn)->next_byte))
+
+/* Return next non-INT3 address, or 0 if failed to access */
+static inline unsigned long skip_padding_int3(unsigned long addr)
+{
+ unsigned char ops;
+
+ while (get_kernel_nofault(ops, (void *)addr) == 0) {
+ if (ops != INT3_INSN_OPCODE)
+ return addr;
+ addr++;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_OPTPROBES
extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4c3c27b6aea3..b20484cc0025 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -255,44 +255,49 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
/* Check if paddr is at an instruction boundary */
static int can_probe(unsigned long paddr)
{
- unsigned long addr, __addr, offset = 0;
- struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
+ unsigned long addr, offset = 0;
+ struct insn insn;
if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
return 0;
- /* Decode instructions */
- addr = paddr - offset;
- while (addr < paddr) {
- int ret;
+ /* The first address must be instruction boundary. */
+ if (!offset)
+ return 1;
+ /* Decode instructions */
+ for_each_insn(&insn, paddr - offset, paddr, buf) {
/*
- * Check if the instruction has been modified by another
- * kprobe, in which case we replace the breakpoint by the
- * original instruction in our buffer.
- * Also, jump optimization will change the breakpoint to
- * relative-jump. Since the relative-jump itself is
- * normally used, we just go through if there is no kprobe.
+ * CONFIG_RETHUNK or CONFIG_SLS or another debug feature
+ * may install INT3.
*/
- __addr = recover_probed_instruction(buf, addr);
- if (!__addr)
- return 0;
-
- ret = insn_decode_kernel(&insn, (void *)__addr);
- if (ret < 0)
- return 0;
+ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) {
+ /* Find the next non-INT3 instruction address */
+ addr = skip_padding_int3((unsigned long)insn.kaddr);
+ if (!addr)
+ return 0;
+ /*
+ * This can be a padding INT3 for CONFIG_RETHUNK or
+ * CONFIG_SLS. If a branch jumps to the address next
+ * to the INT3 sequence, this is just for padding,
+ * then we can continue decoding.
+ */
+ for_each_insn(&insn, paddr - offset, addr, buf) {
+ if (insn_get_branch_addr(&insn) == addr)
+ goto found;
+ }
- /*
- * Another debugging subsystem might insert this breakpoint.
- * In that case, we can't recover it.
- */
- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+ /* This INT3 can not be decoded safely. */
return 0;
- addr += insn.length;
+found:
+ /* Set loop cursor */
+ insn.next_byte = (void *)addr;
+ continue;
+ }
}
- return (addr == paddr);
+ return ((unsigned long)insn.next_byte == paddr);
}
/* If x86 supports IBT (ENDBR) it must be skipped. */
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e6b8c5362b94..2e41850cab06 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -235,28 +235,9 @@ static int __insn_is_indirect_jump(struct insn *insn)
/* Check whether insn jumps into specified address range */
static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
{
- unsigned long target = 0;
-
- switch (insn->opcode.bytes[0]) {
- case 0xe0: /* loopne */
- case 0xe1: /* loope */
- case 0xe2: /* loop */
- case 0xe3: /* jcxz */
- case 0xe9: /* near relative jump */
- case 0xeb: /* short relative jump */
- break;
- case 0x0f:
- if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
- break;
- return 0;
- default:
- if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
- break;
- return 0;
- }
- target = (unsigned long)insn->next_byte + insn->immediate.value;
+ unsigned long target = insn_get_branch_addr(insn);
- return (start <= target && target <= start + len);
+ return target ? (start <= target && target <= start + len) : 0;
}
static int insn_is_indirect_jump(struct insn *insn)