[patch 25/38] x86/modules: Add call thunk patching

From: Thomas Gleixner
Date: Sat Jul 16 2022 - 19:19:12 EST


As for the builtins create call thunks and patch the call sites to call the
thunk on Intel SKL CPUs for retbleed mitigation.

Note, that module init functions are ignored for sake of simplicity because
loading modules is not something which is done in high frequent loops and
the attacker has not really a handle on when this happens in order to
launch a matching attack. The depth tracking will still work for calls into
the builtins and because the call is not accounted it will underflow faster
and overstuff, but that's mitigated by the saturating counter and the side
effect is only temporary.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/include/asm/alternative.h | 7 +++++
arch/x86/kernel/callthunks.c | 49 +++++++++++++++++++++++++++++++++++++
arch/x86/kernel/module.c | 29 +++++++++++++++++++++
include/linux/module.h | 4 +++
4 files changed, 88 insertions(+), 1 deletion(-)

--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -90,8 +90,15 @@ struct callthunk_sites {

#ifdef CONFIG_CALL_THUNKS
extern void callthunks_patch_builtin_calls(void);
+extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
+ struct module *mod);
+extern void callthunks_module_free(struct module *mod);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
+static __always_inline void
+callthunks_patch_module_calls(struct callthunk_sites *sites,
+ struct module *mod) {}
+static __always_inline void callthunks_module_free(struct module *mod) { }
#endif

#ifdef CONFIG_SMP
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -329,6 +329,20 @@ static __init_or_module void callthunk_a
area->tmem->is_rx = true;
}

+static __init_or_module int callthunk_set_modname(struct module_layout *layout)
+{
+#ifdef CONFIG_MODULES
+ struct module *mod = layout->mtn.mod;
+
+ if (mod) {
+ mod->callthunk_name = kasprintf(GFP_KERNEL, "callthunk:%s", mod->name);
+ if (!mod->callthunk_name)
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
static __init_or_module int callthunks_setup(struct callthunk_sites *cs,
struct module_layout *layout)
{
@@ -404,6 +418,10 @@ static __init_or_module int callthunks_s
callthunk_area_set_rx(area);
sync_core();

+ ret = callthunk_set_modname(layout);
+ if (ret)
+ goto fail;
+
layout->base = thunk;
layout->size = text_size;
layout->text_size = text_size;
@@ -457,3 +475,34 @@ void __init callthunks_patch_builtin_cal
callthunks_init(&cs);
mutex_unlock(&text_mutex);
}
+
+#ifdef CONFIG_MODULES
+void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
+ struct module *mod)
+{
+ struct module_layout *layout = &mod->thunk_layout;
+
+ if (!thunks_initialized)
+ return;
+
+ layout->mtn.mod = mod;
+ mutex_lock(&text_mutex);
+ WARN_ON_ONCE(callthunks_setup(cs, layout));
+ mutex_unlock(&text_mutex);
+}
+
+void callthunks_module_free(struct module *mod)
+{
+ struct module_layout *layout = &mod->thunk_layout;
+ struct thunk_mem_area *area = layout->arch_data;
+
+ if (!thunks_initialized || !area)
+ return;
+
+ prdbg("Free %s\n", layout_getname(layout));
+ layout->arch_data = NULL;
+ mutex_lock(&text_mutex);
+ callthunk_free(area, true);
+ mutex_unlock(&text_mutex);
+}
+#endif /* CONFIG_MODULES */
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -196,7 +196,8 @@ int module_finalize(const Elf_Ehdr *hdr,
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL, *orc = NULL, *orc_ip = NULL,
- *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
+ *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
+ *syms = NULL, *calls = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;

for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@@ -216,6 +217,10 @@ int module_finalize(const Elf_Ehdr *hdr,
retpolines = s;
if (!strcmp(".return_sites", secstrings + s->sh_name))
returns = s;
+ if (!strcmp(".sym_sites", secstrings + s->sh_name))
+ syms = s;
+ if (!strcmp(".call_sites", secstrings + s->sh_name))
+ calls = s;
if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
ibt_endbr = s;
}
@@ -241,10 +246,31 @@ int module_finalize(const Elf_Ehdr *hdr,
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
+ if (calls || syms || para) {
+ struct callthunk_sites cs = {};
+
+ if (syms) {
+ cs.syms_start = (void *)syms->sh_addr;
+ cs.syms_end = (void *)syms->sh_addr + syms->sh_size;
+ }
+
+ if (calls) {
+ cs.call_start = (void *)calls->sh_addr;
+ cs.call_end = (void *)calls->sh_addr + calls->sh_size;
+ }
+
+ if (para) {
+ cs.pv_start = (void *)para->sh_addr;
+ cs.pv_end = (void *)para->sh_addr + para->sh_size;
+ }
+
+ callthunks_patch_module_calls(&cs, me);
+ }
if (ibt_endbr) {
void *iseg = (void *)ibt_endbr->sh_addr;
apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
}
+
if (locks && text) {
void *lseg = (void *)locks->sh_addr;
void *tseg = (void *)text->sh_addr;
@@ -266,4 +292,5 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
+ callthunks_module_free(mod);
}
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -525,6 +525,10 @@ struct module {
struct pi_entry **printk_index_start;
#endif

+#ifdef CONFIG_CALL_THUNKS
+ char *callthunk_name;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head source_list;