Re: [PATCH v6 08/17] static_call: Avoid kprobes on inline static_call()s

From: Steven Rostedt
Date: Fri Jul 10 2020 - 18:00:19 EST


On Fri, 10 Jul 2020 15:38:39 +0200
Peter Zijlstra <peterz@xxxxxxxxxxxxx> wrote:

> Similar to how we disallow kprobes on any other dynamic text
> (ftrace/jump_label) also disallow kprobes on inline static_call()s.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>

I'd like to have Masami review this patch too.

> ---
> arch/x86/kernel/kprobes/opt.c | 4 +-
> include/linux/static_call.h | 11 +++++++
> kernel/kprobes.c | 2 +
> kernel/static_call.c | 64 ++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 80 insertions(+), 1 deletion(-)
>
> --- a/arch/x86/kernel/kprobes/opt.c
> +++ b/arch/x86/kernel/kprobes/opt.c
> @@ -17,6 +17,7 @@
> #include <linux/ftrace.h>
> #include <linux/frame.h>
> #include <linux/pgtable.h>
> +#include <linux/static_call.h>
>
> #include <asm/text-patching.h>
> #include <asm/cacheflush.h>
> @@ -209,7 +210,8 @@ static int copy_optimized_instructions(u
> /* Check whether the address range is reserved */
> if (ftrace_text_reserved(src, src + len - 1) ||
> alternatives_text_reserved(src, src + len - 1) ||
> - jump_label_text_reserved(src, src + len - 1))
> + jump_label_text_reserved(src, src + len - 1) ||
> + static_call_text_reserved(src, src + len - 1))
> return -EBUSY;
>
> return len;
> --- a/include/linux/static_call.h
> +++ b/include/linux/static_call.h
> @@ -110,6 +110,7 @@ struct static_call_key {
>
> extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
> extern int static_call_mod_init(struct module *mod);
> +extern int static_call_text_reserved(void *start, void *end);
>
> #define DEFINE_STATIC_CALL(name, _func) \
> DECLARE_STATIC_CALL(name, _func); \
> @@ -153,6 +154,11 @@ void __static_call_update(struct static_
> cpus_read_unlock();
> }
>
> +static inline int static_call_text_reserved(void *start, void *end)
> +{
> + return 0;
> +}
> +
> #define EXPORT_STATIC_CALL(name) \
> EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
> EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
> @@ -182,6 +188,11 @@ void __static_call_update(struct static_
> WRITE_ONCE(key->func, func);
> }
>
> +static inline int static_call_text_reserved(void *start, void *end)
> +{
> + return 0;
> +}
> +
> #define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
> #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
>
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -35,6 +35,7 @@
> #include <linux/ftrace.h>
> #include <linux/cpu.h>
> #include <linux/jump_label.h>
> +#include <linux/static_call.h>
>
> #include <asm/sections.h>
> #include <asm/cacheflush.h>
> @@ -1591,6 +1592,7 @@ static int check_kprobe_address_safe(str
> if (!kernel_text_address((unsigned long) p->addr) ||
> within_kprobe_blacklist((unsigned long) p->addr) ||
> jump_label_text_reserved(p->addr, p->addr) ||
> + static_call_text_reserved(p->addr, p->addr) ||
> find_bug((unsigned long)p->addr)) {
> ret = -EINVAL;
> goto out;
> --- a/kernel/static_call.c
> +++ b/kernel/static_call.c
> @@ -204,8 +204,58 @@ static int __static_call_init(struct mod
> return 0;
> }
>
> +static int addr_conflict(struct static_call_site *site, void *start, void *end)
> +{
> + unsigned long addr = (unsigned long)static_call_addr(site);
> +
> + if (addr <= (unsigned long)end &&
> + addr + CALL_INSN_SIZE > (unsigned long)start)
> + return 1;
> +
> + return 0;
> +}
> +
> +static int __static_call_text_reserved(struct static_call_site *iter_start,
> + struct static_call_site *iter_stop,
> + void *start, void *end)
> +{
> + struct static_call_site *iter = iter_start;
> +
> + while (iter < iter_stop) {
> + if (addr_conflict(iter, start, end))
> + return 1;
> + iter++;
> + }
> +
> + return 0;
> +}
> +
> #ifdef CONFIG_MODULES
>
> +static int __static_call_mod_text_reserved(void *start, void *end)
> +{
> + struct module *mod;
> + int ret;
> +
> + preempt_disable();
> + mod = __module_text_address((unsigned long)start);
> + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
> + if (!try_module_get(mod))
> + mod = NULL;
> + preempt_enable();
> +
> + if (!mod)
> + return 0;
> +
> + ret = __static_call_text_reserved(mod->static_call_sites,
> + mod->static_call_sites + mod->num_static_call_sites,
> + start, end);
> +
> + module_put(mod);
> +
> + return ret;
> +}
> +
> static int static_call_add_module(struct module *mod)
> {
> return __static_call_init(mod, mod->static_call_sites,
> @@ -275,6 +325,20 @@ static struct notifier_block static_call
>
> #endif /* CONFIG_MODULES */
>
> +int static_call_text_reserved(void *start, void *end)
> +{
> + int ret = __static_call_text_reserved(__start_static_call_sites,
> + __stop_static_call_sites, start, end);
> +
> + if (ret)
> + return ret;
> +
> +#ifdef CONFIG_MODULES
> + ret = __static_call_mod_text_reserved(start, end);
> +#endif

Nit, but why not have a #else /* !CONFIG_MODULE */ above and just:

static inline int __static_call_mod_text_reserve(..)
{
return 0;
}
#endif

?

Then you don't need the extra #ifdef above, and just end it with:

return __static_call_mod_next_reserved(start, end);

-- Steve


> + return ret;
> +}
> +
> static void __init static_call_init(void)
> {
> int ret;
>