[PATCH 1/2] scs: switch to vmapped shadow stacks

From: Sami Tolvanen
Date: Thu Oct 22 2020 - 16:24:10 EST


The kernel currently uses kmem_cache to allocate shadow call stacks,
which means an overflow may not be immediately detected and can
potentially result in another task's shadow stack to be overwritten.

This change switches SCS to use virtually mapped shadow stacks,
which increases shadow stack size to a full page and provides more
robust overflow detection similarly to VMAP_STACK.

Signed-off-by: Sami Tolvanen <samitolvanen@xxxxxxxxxx>
---
include/linux/scs.h | 7 +----
kernel/scs.c | 63 ++++++++++++++++++++++++++++++++++++++-------
2 files changed, 55 insertions(+), 15 deletions(-)

diff --git a/include/linux/scs.h b/include/linux/scs.h
index 6dec390cf154..86e3c4b7b714 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -15,12 +15,7 @@

#ifdef CONFIG_SHADOW_CALL_STACK

-/*
- * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
- * architecture) provided ~40% safety margin on stack usage while keeping
- * memory allocation overhead reasonable.
- */
-#define SCS_SIZE SZ_1K
+#define SCS_SIZE PAGE_SIZE
#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)

/* An illegal pointer value to mark the end of the shadow stack. */
diff --git a/kernel/scs.c b/kernel/scs.c
index 4ff4a7ba0094..2136edba548d 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -5,50 +5,95 @@
* Copyright (C) 2019 Google LLC
*/

+#include <linux/cpuhotplug.h>
#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/scs.h>
-#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/vmstat.h>

-static struct kmem_cache *scs_cache;
-
static void __scs_account(void *s, int account)
{
- struct page *scs_page = virt_to_page(s);
+ struct page *scs_page = vmalloc_to_page(s);

mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
account * (SCS_SIZE / SZ_1K));
}

+/* Matches NR_CACHED_STACKS for VMAP_STACK */
+#define NR_CACHED_SCS 2
+static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
+
static void *scs_alloc(int node)
{
- void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
+ int i;
+ void *s;
+
+ for (i = 0; i < NR_CACHED_SCS; i++) {
+ s = this_cpu_xchg(scs_cache[i], NULL);
+ if (s) {
+ memset(s, 0, SCS_SIZE);
+ goto out;
+ }
+ }
+
+ /*
+ * We allocate a full page for the shadow stack, which should be
+ * more than we need. Check the assumption nevertheless.
+ */
+ BUILD_BUG_ON(SCS_SIZE > PAGE_SIZE);
+
+ s = __vmalloc_node_range(PAGE_SIZE, SCS_SIZE,
+ VMALLOC_START, VMALLOC_END,
+ GFP_SCS, PAGE_KERNEL, 0,
+ node, __builtin_return_address(0));

if (!s)
return NULL;

+out:
*__scs_magic(s) = SCS_END_MAGIC;

/*
* Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled.
*/
- kasan_poison_object_data(scs_cache, s);
+ kasan_poison_vmalloc(s, SCS_SIZE);
__scs_account(s, 1);
return s;
}

static void scs_free(void *s)
{
+ int i;
+
__scs_account(s, -1);
- kasan_unpoison_object_data(scs_cache, s);
- kmem_cache_free(scs_cache, s);
+ kasan_unpoison_vmalloc(s, SCS_SIZE);
+
+ for (i = 0; i < NR_CACHED_SCS; i++)
+ if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
+ return;
+
+ vfree_atomic(s);
+}
+
+static int scs_cleanup(unsigned int cpu)
+{
+ int i;
+ void **cache = per_cpu_ptr(scs_cache, cpu);
+
+ for (i = 0; i < NR_CACHED_SCS; i++) {
+ vfree(cache[i]);
+ cache[i] = NULL;
+ }
+
+ return 0;
}

void __init scs_init(void)
{
- scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
+ scs_cleanup);
}

int scs_prepare(struct task_struct *tsk, int node)
--
2.29.0.rc1.297.gfa9743e501-goog