KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.
Tested on berlin arm64 platform.
~ # mount -t debugfs debugfs /sys/kernel/debug/
~ # cd /sys/kernel/debug/
/sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events
before the patch:
/sys/kernel/debug # cat kprobes/list
ffffff801009fe28 k _do_fork+0x0 [DISABLED]
after the patch:
/sys/kernel/debug # cat kprobes/list
ffffff801009ff54 k _do_fork+0x4 [DISABLED][FTRACE]
Signed-off-by: Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx>
---
KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.
Applied after arm64 FTRACE_WITH_REGS:
http://lists.infradead.org/pipermail/linux-arm-kernel/2019-August/674404.html
Changes since v3:
- move kprobe_lookup_name() and arch_kprobe_on_func_entry to ftrace.c since
we only want to choose the ftrace entry for KPROBES_ON_FTRACE.
- only choose ftrace entry if (addr && !offset)
Changes since v2:
- remove patch1, make it a single cleanup patch
- remove "This patch" in the change log
- implement arm64's kprobe_lookup_name() and arch_kprobe_on_func_entry instead
of patching the common kprobes code
Changes since v1:
- make the kprobes/x86: use instruction_pointer and instruction_pointer_set
as patch1
- add Masami's ACK to patch1
- add some description about KPROBES_ON_FTRACE and why we need it on
arm64
- correct the log before the patch
- remove the consolidation patch, make it as TODO
- only adjust kprobe's addr when KPROBE_FLAG_FTRACE is set
- if KPROBES_ON_FTRACE, ftrace_call_adjust() the kprobe's addr before
calling ftrace_location()
- update the kprobes-on-ftrace/arch-support.txt in doc
.../debug/kprobes-on-ftrace/arch-support.txt | 2 +-
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/probes/Makefile | 1 +
arch/arm64/kernel/probes/ftrace.c | 84 +++++++++++++++++++
4 files changed, 87 insertions(+), 1 deletion(-)
create mode 100644 arch/arm64/kernel/probes/ftrace.c
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 68f266944d5f..e8358a38981c 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -9,7 +9,7 @@
| alpha: | TODO |
| arc: | TODO |
| arm: | TODO |
- | arm64: | TODO |
+ | arm64: | ok |
| c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 663392d1eae2..928700f15e23 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -167,6 +167,7 @@ config ARM64
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_GENERIC_VDSO
select IOMMU_DMA if IOMMU_SUPPORT
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index 8e4be92e25b1..4020cfc66564 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
simulate-insn.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
simulate-insn.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
diff --git a/arch/arm64/kernel/probes/ftrace.c b/arch/arm64/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..5989c57660f3
--- /dev/null
+++ b/arch/arm64/kernel/probes/ftrace.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright (C) 2019 Jisheng Zhang <jszhang@xxxxxxxxxx>
+ * Synaptics Incorporated
+ */
+
+#include <linux/kprobes.h>
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ /* Preempt is disabled by ftrace */
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ return;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+ /* Kprobe handler expects regs->pc = pc + 4 as breakpoint hit */
+ instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));