[PATCH 5.15 009/277] kfence: move saving stack trace of allocations into __kfence_alloc()
From: Greg Kroah-Hartman
Date: Tue Apr 12 2022 - 03:06:00 EST
From: Marco Elver <elver@xxxxxxxxxx>
[ Upstream commit a9ab52bbcb52df49ec4b30e6741e120588989455 ]
Move the saving of the stack trace of allocations into __kfence_alloc(),
so that the stack entries array can be used outside of
kfence_guarded_alloc() and we avoid potentially unwinding the stack
multiple times.
Link: https://lkml.kernel.org/r/20210923104803.2620285-3-elver@xxxxxxxxxx
Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
Reviewed-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Acked-by: Alexander Potapenko <glider@xxxxxxxxxx>
Cc: Aleksandr Nogikh <nogikh@xxxxxxxxxx>
Cc: Jann Horn <jannh@xxxxxxxxxx>
Cc: Taras Madan <tarasmadan@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
mm/kfence/core.c | 35 ++++++++++++++++++++++++-----------
1 file changed, 24 insertions(+), 11 deletions(-)
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index f26f55850ad7..4eec0c5d32b5 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -188,19 +188,26 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
* Update the object's metadata state, including updating the alloc/free stacks
* depending on the state transition.
*/
-static noinline void metadata_update_state(struct kfence_metadata *meta,
- enum kfence_object_state next)
+static noinline void
+metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
+ unsigned long *stack_entries, size_t num_stack_entries)
{
struct kfence_track *track =
next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
lockdep_assert_held(&meta->lock);
- /*
- * Skip over 1 (this) functions; noinline ensures we do not accidentally
- * skip over the caller by never inlining.
- */
- track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
+ if (stack_entries) {
+ memcpy(track->stack_entries, stack_entries,
+ num_stack_entries * sizeof(stack_entries[0]));
+ } else {
+ /*
+ * Skip over 1 (this) functions; noinline ensures we do not
+ * accidentally skip over the caller by never inlining.
+ */
+ num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
+ }
+ track->num_stack_entries = num_stack_entries;
track->pid = task_pid_nr(current);
track->cpu = raw_smp_processor_id();
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
@@ -262,7 +269,8 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
}
}
-static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
+static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
+ unsigned long *stack_entries, size_t num_stack_entries)
{
struct kfence_metadata *meta = NULL;
unsigned long flags;
@@ -321,7 +329,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
addr = (void *)meta->addr;
/* Update remaining metadata. */
- metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
+ metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
WRITE_ONCE(meta->cache, cache);
meta->size = size;
@@ -401,7 +409,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
memzero_explicit(addr, meta->size);
/* Mark the object as freed. */
- metadata_update_state(meta, KFENCE_OBJECT_FREED);
+ metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
raw_spin_unlock_irqrestore(&meta->lock, flags);
@@ -746,6 +754,9 @@ void kfence_shutdown_cache(struct kmem_cache *s)
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
+ unsigned long stack_entries[KFENCE_STACK_DEPTH];
+ size_t num_stack_entries;
+
/*
* Perform size check before switching kfence_allocation_gate, so that
* we don't disable KFENCE without making an allocation.
@@ -785,7 +796,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
if (!READ_ONCE(kfence_enabled))
return NULL;
- return kfence_guarded_alloc(s, size, flags);
+ num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
+
+ return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries);
}
size_t kfence_ksize(const void *addr)
--
2.35.1