[RFC PATCH v1 23/28] riscv signal: Save and restore of shadow stack for signal
From: debug
Date: Thu Jan 25 2024 - 01:36:50 EST
From: Deepak Gupta <debug@xxxxxxxxxxxx>
Save shadow stack pointer in sigcontext structure while delivering signal.
Restore shadow stack pointer from sigcontext on sigreturn.
Signed-off-by: Deepak Gupta <debug@xxxxxxxxxxxx>
---
arch/riscv/include/asm/usercfi.h | 18 ++++++++++++
arch/riscv/kernel/signal.c | 45 ++++++++++++++++++++++++++++++
arch/riscv/kernel/usercfi.c | 47 ++++++++++++++++++++++++++++++++
3 files changed, 110 insertions(+)
diff --git a/arch/riscv/include/asm/usercfi.h b/arch/riscv/include/asm/usercfi.h
index 4bd10dcd48aa..28c67866ff6f 100644
--- a/arch/riscv/include/asm/usercfi.h
+++ b/arch/riscv/include/asm/usercfi.h
@@ -33,6 +33,9 @@ bool is_shstk_enabled(struct task_struct *task);
bool is_shstk_locked(struct task_struct *task);
bool is_indir_lp_enabled(struct task_struct *task);
bool is_indir_lp_locked(struct task_struct *task);
+unsigned long get_active_shstk(struct task_struct *task);
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr);
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr);
#define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK (PR_SHADOW_STACK_ENABLE)
@@ -70,6 +73,16 @@ static inline bool is_shstk_locked(struct task_struct *task)
return false;
}
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr)
+{
+ return -EINVAL;
+}
+
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr)
+{
+ return -EINVAL;
+}
+
static inline bool is_indir_lp_enabled(struct task_struct *task)
{
return false;
@@ -81,6 +94,11 @@ static inline bool is_indir_lp_locked(struct task_struct *task)
return false;
}
+static inline unsigned long get_active_shstk(struct task_struct *task)
+{
+ return 0;
+}
+
#endif /* CONFIG_RISCV_USER_CFI */
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 88b6220b2608..d1092f0a6363 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -22,6 +22,7 @@
#include <asm/vector.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
+#include <asm/usercfi.h>
unsigned long signal_minsigstksz __ro_after_init;
@@ -229,6 +230,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
struct task_struct *task;
+ unsigned long ss_ptr = 0;
sigset_t set;
size_t frame_size = get_rt_frame_size(false);
@@ -251,6 +253,26 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
+ /*
+ * Restore shadow stack as a form of token stored on shadow stack itself as a safe
+ * way to restore.
+ * A token on shadow gives following properties
+ * - Safe save and restore for shadow stack switching. Any save of shadow stack
+ * must have had saved a token on shadow stack. Similarly any restore of shadow
+ * stack must check the token before restore. Since writing to shadow stack with
+ * address of shadow stack itself is not easily allowed. A restore without a save
+ * is quite difficult for an attacker to perform.
+ * - A natural break. A token in shadow stack provides a natural break in shadow stack
+ * So a single linear range can be bucketed into different shadow stack segments.
+ * sspopchk will detect the condition and fault to kernel as sw check exception.
+ */
+ if (__copy_from_user(&ss_ptr, &frame->uc.uc_mcontext.sc_cfi_state.ss_ptr,
+ sizeof(unsigned long)))
+ goto badframe;
+
+ if (is_shstk_enabled(current) && restore_user_shstk(current, ss_ptr))
+ goto badframe;
+
regs->cause = -1UL;
return regs->a0;
@@ -320,6 +342,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct rt_sigframe __user *frame;
long err = 0;
unsigned long __maybe_unused addr;
+ unsigned long ss_ptr = 0;
size_t frame_size = get_rt_frame_size(false);
frame = get_sigframe(ksig, regs, frame_size);
@@ -331,6 +354,23 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
+ /*
+ * Save a pointer to shadow stack itself on shadow stack as a form of token.
+ * A token on shadow gives following properties
+ * - Safe save and restore for shadow stack switching. Any save of shadow stack
+ * must have had saved a token on shadow stack. Similarly any restore of shadow
+ * stack must check the token before restore. Since writing to shadow stack with
+ * address of shadow stack itself is not easily allowed. A restore without a save
+ * is quite difficult for an attacker to perform.
+ * - A natural break. A token in shadow stack provides a natural break in shadow stack
+ * So a single linear range can be bucketed into different shadow stack segments. Any
+ * sspopchk will detect the condition and fault to kernel as sw check exception.
+ */
+ if (is_shstk_enabled(current)) {
+ err |= save_user_shstk(current, &ss_ptr);
+ err |= __put_user(ss_ptr, &frame->uc.uc_mcontext.sc_cfi_state.ss_ptr);
+ }
+
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigcontext(frame, regs);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
@@ -341,6 +381,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
#ifdef CONFIG_MMU
regs->ra = (unsigned long)VDSO_SYMBOL(
current->mm->context.vdso, rt_sigreturn);
+
+ /* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we need this? */
+ if (is_shstk_enabled(current))
+ regs->t0 = regs->ra;
+
#else
/*
* For the nommu case we don't have a VDSO. Instead we push two
diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c
index af8cc8f4616c..f5eb0124571b 100644
--- a/arch/riscv/kernel/usercfi.c
+++ b/arch/riscv/kernel/usercfi.c
@@ -52,6 +52,11 @@ void set_active_shstk(struct task_struct *task, unsigned long shstk_addr)
task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
}
+unsigned long get_active_shstk(struct task_struct *task)
+{
+ return task->thread_info.user_cfi_state.user_shdw_stk;
+}
+
void set_shstk_status(struct task_struct *task, bool enable)
{
task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0;
@@ -165,6 +170,48 @@ static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
return 0;
}
+/*
+ * Save user shadow stack pointer on shadow stack itself and return pointer to saved location
+ * returns -EFAULT if operation was unsuccessful
+ */
+int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr)
+{
+ unsigned long ss_ptr = 0;
+ unsigned long token_loc = 0;
+ int ret = 0;
+
+ if (saved_shstk_ptr == NULL)
+ return -EINVAL;
+
+ ss_ptr = get_active_shstk(tsk);
+ ret = create_rstor_token(ss_ptr, &token_loc);
+
+ *saved_shstk_ptr = token_loc;
+ return ret;
+}
+
+/*
+ * Restores user shadow stack pointer from token on shadow stack for task `tsk`
+ * returns -EFAULT if operation was unsuccessful
+ */
+int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr)
+{
+ unsigned long token = 0;
+
+ token = amo_user_shstk((unsigned long __user *)shstk_ptr, 0);
+
+ if (token == -1)
+ return -EFAULT;
+
+ /* invalid token, return EINVAL */
+ if ((token - shstk_ptr) != SHSTK_ENTRY_SIZE)
+ return -EINVAL;
+
+ /* all checks passed, set active shstk and return success */
+ set_active_shstk(tsk, token);
+ return 0;
+}
+
static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
unsigned long token_offset,
bool set_tok)
--
2.43.0