[patch V3 01/32] mm/slab: Fix broken stack trace storage

From: Thomas Gleixner
Date: Sun Apr 14 2019 - 12:02:18 EST


kstack_end() is broken on interrupt stacks as they are not guaranteed to be
sized THREAD_SIZE and THREAD_SIZE aligned.

Use the stack tracer instead. Remove the pointless pointer increment at the
end of the function while at it.

Fixes: 98eb235b7feb ("[PATCH] page unmapping debug") - History tree
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: linux-mm@xxxxxxxxx
---
mm/slab.c | 28 ++++++++++++----------------
1 file changed, 12 insertions(+), 16 deletions(-)

--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1470,33 +1470,29 @@ static bool is_debug_pagealloc_cache(str
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
- int size = cachep->object_size;
+ int size = cachep->object_size / sizeof(unsigned long);

addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];

- if (size < 5 * sizeof(unsigned long))
+ if (size < 5)
return;

*addr++ = 0x12345678;
*addr++ = caller;
*addr++ = smp_processor_id();
- size -= 3 * sizeof(unsigned long);
+#ifdef CONFIG_STACKTRACE
{
- unsigned long *sptr = &caller;
- unsigned long svalue;
-
- while (!kstack_end(sptr)) {
- svalue = *sptr++;
- if (kernel_text_address(svalue)) {
- *addr++ = svalue;
- size -= sizeof(unsigned long);
- if (size <= sizeof(unsigned long))
- break;
- }
- }
+ struct stack_trace trace = {
+ .max_entries = size - 4;
+ .entries = addr;
+ .skip = 3;
+ };

+ save_stack_trace(&trace);
+ addr += trace.nr_entries;
}
- *addr++ = 0x87654321;
+#endif
+ *addr = 0x87654321;
}

static void slab_kernel_map(struct kmem_cache *cachep, void *objp,