Re: [PATCH v3 2/2] arm64: kprobe: Enable OPTPROBE for arm64

From: Masami Hiramatsu
Date: Wed Aug 11 2021 - 03:20:10 EST


Hi Qi,

Thanks for updating.

On Tue, 10 Aug 2021 13:53:30 +0800
Qi Liu <liuqi115@xxxxxxxxxx> wrote:

[...]
> +int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
> + struct kprobe *orig)
> +{
> + kprobe_opcode_t *code, *buf;
> + void **addrs;
> + u32 insn;
> + int ret, i;
> +
> + addrs = kcalloc(TMPL_MAX_LENGTH, sizeof(void *), GFP_KERNEL);
> + if (!addrs)
> + return -ENOMEM;
> +
> + buf = kcalloc(TMPL_MAX_LENGTH, sizeof(kprobe_opcode_t), GFP_KERNEL);
> + if (!buf) {
> + kfree(addrs);
> + return -ENOMEM;
> + }
> +
> + code = get_optinsn_slot();
> + if (!code) {
> + kfree(addrs);
> + kfree(buf);
> + return -ENOMEM;
> + }
> +
> + if (!is_offset_in_range((unsigned long)code,
> + (unsigned long)orig->addr + 8)) {
> + ret = -ERANGE;
> + goto error;
> + }
> +
> + if (!is_offset_in_range((unsigned long)code + TMPL_CALL_BACK,
> + (unsigned long)optimized_callback)) {
> + ret = -ERANGE;
> + goto error;
> + }
> +
> + if (!is_offset_in_range((unsigned long)&code[TMPL_RESTORE_END],
> + (unsigned long)op->kp.addr + 4)) {
> + ret = -ERANGE;
> + goto error;
> + }
> +
> + memcpy(buf, optprobe_template_entry,
> + TMPL_END_IDX * sizeof(kprobe_opcode_t));
> +
> + buf[TMPL_VAL_IDX] = FIELD_GET(GENMASK(31, 0), (unsigned long long)op);
> + buf[TMPL_VAL_IDX + 1] =
> + FIELD_GET(GENMASK(63, 32), (unsigned long long)op);
> + buf[TMPL_RESTORE_ORIGN_INSN] = orig->opcode;
> +
> + insn = aarch64_insn_gen_branch_imm(
> + (unsigned long)(&code[TMPL_CALL_BACK]),
> + (unsigned long)optimized_callback, AARCH64_INSN_BRANCH_LINK);
> + buf[TMPL_CALL_BACK] = insn;
> +
> + insn = aarch64_insn_gen_branch_imm(
> + (unsigned long)(&code[TMPL_RESTORE_END]),
> + (unsigned long)(op->kp.addr) + 4, AARCH64_INSN_BRANCH_NOLINK);
> + buf[TMPL_RESTORE_END] = insn;
> +
> + /* Setup template */
> + for (i = 0; i < TMPL_MAX_LENGTH; i++)
> + addrs[i] = code + i;
> +
> + ret = aarch64_insn_patch_text(addrs, buf, TMPL_MAX_LENGTH);
> + if (ret < 0)
> + goto error;

OK, this part looks good to me.

> +
> + flush_icache_range((unsigned long)code,
> + (unsigned long)(&code[TMPL_END_IDX]));
> +
> + /* Set op->optinsn.insn means prepared. */
> + op->optinsn.insn = code;
> +
> +out:
> + kfree(addrs);
> + kfree(buf);
> + return ret;
> +
> +error:
> + free_optinsn_slot(code, 0);
> + goto out;
> +}
> +
> +void arch_optimize_kprobes(struct list_head *oplist)
> +{
> + struct optimized_kprobe *op, *tmp;
> +
> + list_for_each_entry_safe(op, tmp, oplist, list) {
> + u32 insn;
> +
> + WARN_ON(kprobe_disabled(&op->kp));
> +
> + /*
> + * Backup instructions which will be replaced
> + * by jump address
> + */
> + memcpy(op->optinsn.copied_insn, op->kp.addr,
> + RELATIVEJUMP_SIZE);
> + insn = aarch64_insn_gen_branch_imm((unsigned long)op->kp.addr,
> + (unsigned long)op->optinsn.insn,
> + AARCH64_INSN_BRANCH_NOLINK);
> +
> + WARN_ON(insn == 0);
> +
> + aarch64_insn_patch_text((void *)&(op->kp.addr), &insn, 1);

Can you also reduce the number of aarch64_insn_patch_text() here?
Since arch_optimize_kprobes() running in the workqueue context, you can
allocate memory. Thus, you can do something like this(not cleaned)

#define OPTPROBE_BATCH_SIZE 64

void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
void **addrs;
u32 *insns;
int i = 0;

addrs = kcalloc(OPTPROBE_BATCH_SIZE, sizeof(*addrs), GFP_KERNEL);
insns = kcalloc(OPTPROBE_BATCH_SIZE, sizeof(*insns), GFP_KERNEL);

list_for_each_entry_safe(op, tmp, oplist, list) {
memcpy(op->optinsn.copied_insn, op->kp.addr,
RELATIVEJUMP_SIZE);
addrs[i] = op->kp.addr;
insns[i] = aarch64_insn_gen_branch_imm((unsigned long)op->kp.addr,
(unsigned long)op->optinsn.insn,
AARCH64_INSN_BRANCH_NOLINK);
list_del_init(&op->list);
if (++i == OPTPROBE_BATCH_SIZE)
break;
}
aarch64_insn_patch_text(addrs, insns, i);

kfree(addrs);
kfree(insns);
}

Since the stop_machine() penalty is heavier than you think (especially,
if the machine has many cores), it must be avoided as much as possible.


> +
> + list_del_init(&op->list);
> + }
> +}
> +
> +void arch_unoptimize_kprobe(struct optimized_kprobe *op)
> +{
> + arch_arm_kprobe(&op->kp);
> +}
> +
> +/*
> + * Recover original instructions and breakpoints from relative jumps.
> + * Caller must call with locking kprobe_mutex.
> + */
> +void arch_unoptimize_kprobes(struct list_head *oplist,
> + struct list_head *done_list)
> +{
> + struct optimized_kprobe *op, *tmp;
> +
> + list_for_each_entry_safe(op, tmp, oplist, list) {
> + arch_unoptimize_kprobe(op);
> + list_move(&op->list, done_list);
> + }
> +}

Ditto.
You don't need to use arch_arm_kprobe() in this case.

Thank you,

> +
> +void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
> +{
> + if (op->optinsn.insn) {
> + free_optinsn_slot(op->optinsn.insn, 1);
> + op->optinsn.insn = NULL;
> + }
> +}
> diff --git a/arch/arm64/kernel/probes/optprobe_trampoline.S b/arch/arm64/kernel/probes/optprobe_trampoline.S
> new file mode 100644
> index 000000000000..24d713d400cd
> --- /dev/null
> +++ b/arch/arm64/kernel/probes/optprobe_trampoline.S
> @@ -0,0 +1,37 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * trampoline entry and return code for optprobes.
> + */
> +
> +#include <linux/linkage.h>
> +#include <asm/asm-offsets.h>
> +#include <asm/assembler.h>
> +
> + .global optprobe_template_entry
> +optprobe_template_entry:
> + sub sp, sp, #PT_REGS_SIZE
> + save_all_base_regs
> + /* Get parameters to optimized_callback() */
> + ldr x0, 1f
> + mov x1, sp
> + /* Branch to optimized_callback() */
> + .global optprobe_template_call
> +optprobe_template_call:
> + nop
> + restore_all_base_regs
> + ldr lr, [sp, #S_LR]
> + add sp, sp, #PT_REGS_SIZE
> + .global optprobe_template_restore_orig_insn
> +optprobe_template_restore_orig_insn:
> + nop
> + .global optprobe_template_restore_end
> +optprobe_template_restore_end:
> + nop
> + .global optprobe_template_end
> +optprobe_template_end:
> + .global optprobe_template_val
> +optprobe_template_val:
> + 1: .long 0
> + .long 0
> + .global optprobe_template_max_length
> +optprobe_template_max_length:
> --
> 2.17.1
>


--
Masami Hiramatsu <mhiramat@xxxxxxxxxx>