Re: [PATCH v3 2/2] arm64: kprobe: Enable OPTPROBE for arm64

From: liuqi (BA)
Date: Wed Aug 11 2021 - 03:51:12 EST


Hi Masmi,

On 2021/8/11 15:20, Masami Hiramatsu wrote:
Hi Qi,

Thanks for updating.

On Tue, 10 Aug 2021 13:53:30 +0800
Qi Liu <liuqi115@xxxxxxxxxx> wrote:


[...]

+void arch_optimize_kprobes(struct list_head *oplist)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ list_for_each_entry_safe(op, tmp, oplist, list) {
+ u32 insn;
+
+ WARN_ON(kprobe_disabled(&op->kp));
+
+ /*
+ * Backup instructions which will be replaced
+ * by jump address
+ */
+ memcpy(op->optinsn.copied_insn, op->kp.addr,
+ RELATIVEJUMP_SIZE);
+ insn = aarch64_insn_gen_branch_imm((unsigned long)op->kp.addr,
+ (unsigned long)op->optinsn.insn,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ WARN_ON(insn == 0);
+
+ aarch64_insn_patch_text((void *)&(op->kp.addr), &insn, 1);

Can you also reduce the number of aarch64_insn_patch_text() here?
Since arch_optimize_kprobes() running in the workqueue context, you can
allocate memory. Thus, you can do something like this(not cleaned)

#define OPTPROBE_BATCH_SIZE 64

void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
void **addrs;
u32 *insns;
int i = 0;

addrs = kcalloc(OPTPROBE_BATCH_SIZE, sizeof(*addrs), GFP_KERNEL);
insns = kcalloc(OPTPROBE_BATCH_SIZE, sizeof(*insns), GFP_KERNEL);

list_for_each_entry_safe(op, tmp, oplist, list) {
memcpy(op->optinsn.copied_insn, op->kp.addr,
RELATIVEJUMP_SIZE);
addrs[i] = op->kp.addr;
insns[i] = aarch64_insn_gen_branch_imm((unsigned long)op->kp.addr,
(unsigned long)op->optinsn.insn,
AARCH64_INSN_BRANCH_NOLINK);
list_del_init(&op->list);
if (++i == OPTPROBE_BATCH_SIZE)
break;
}
aarch64_insn_patch_text(addrs, insns, i);

kfree(addrs);
kfree(insns);
}

Since the stop_machine() penalty is heavier than you think (especially,
if the machine has many cores), it must be avoided as much as possible.


got it, I'll fix this next time.

+
+ list_del_init(&op->list);
+ }
+}
+
+void arch_unoptimize_kprobe(struct optimized_kprobe *op)
+{
+ arch_arm_kprobe(&op->kp);
+}
+
+/*
+ * Recover original instructions and breakpoints from relative jumps.
+ * Caller must call with locking kprobe_mutex.
+ */
+void arch_unoptimize_kprobes(struct list_head *oplist,
+ struct list_head *done_list)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ list_for_each_entry_safe(op, tmp, oplist, list) {
+ arch_unoptimize_kprobe(op);
+ list_move(&op->list, done_list);
+ }
+}

Ditto.
You don't need to use arch_arm_kprobe() in this case.

Thank you,

thanks, will change this next time.

Qi
+
+void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+{
+ if (op->optinsn.insn) {
+ free_optinsn_slot(op->optinsn.insn, 1);
+ op->optinsn.insn = NULL;
+ }
+}
diff --git a/arch/arm64/kernel/probes/optprobe_trampoline.S b/arch/arm64/kernel/probes/optprobe_trampoline.S
new file mode 100644
index 000000000000..24d713d400cd
--- /dev/null
+++ b/arch/arm64/kernel/probes/optprobe_trampoline.S
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * trampoline entry and return code for optprobes.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .global optprobe_template_entry
+optprobe_template_entry:
+ sub sp, sp, #PT_REGS_SIZE
+ save_all_base_regs
+ /* Get parameters to optimized_callback() */
+ ldr x0, 1f
+ mov x1, sp
+ /* Branch to optimized_callback() */
+ .global optprobe_template_call
+optprobe_template_call:
+ nop
+ restore_all_base_regs
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #PT_REGS_SIZE
+ .global optprobe_template_restore_orig_insn
+optprobe_template_restore_orig_insn:
+ nop
+ .global optprobe_template_restore_end
+optprobe_template_restore_end:
+ nop
+ .global optprobe_template_end
+optprobe_template_end:
+ .global optprobe_template_val
+optprobe_template_val:
+ 1: .long 0
+ .long 0
+ .global optprobe_template_max_length
+optprobe_template_max_length:
--
2.17.1