[PATCH v2 05/18] lib/stackdepot: annotate init and early init functions
From: andrey . konovalov
Date: Fri Feb 10 2023 - 16:16:30 EST
From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
Add comments to stack_depot_early_init and stack_depot_init to explain
certain parts of their implementation.
Also add a pr_info message to stack_depot_early_init similar to the one
in stack_depot_init.
Also move the scale variable in stack_depot_init to the scope where it
is being used.
Reviewed-by: Alexander Potapenko <glider@xxxxxxxxxx>
Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
lib/stackdepot.c | 27 +++++++++++++++++++++------
1 file changed, 21 insertions(+), 6 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 202e07c4f02d..9fab711e4826 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -115,24 +115,34 @@ void __init stack_depot_request_early_init(void)
__stack_depot_early_init_requested = true;
}
+/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
unsigned long entries = 0;
- /* This is supposed to be called only once, from mm_init() */
+ /* This function must be called only once, from mm_init(). */
if (WARN_ON(__stack_depot_early_init_passed))
return 0;
-
__stack_depot_early_init_passed = true;
+ /*
+ * If KASAN is enabled, use the maximum order: KASAN is frequently used
+ * in fuzzing scenarios, which leads to a large number of different
+ * stack traces being stored in stack depot.
+ */
if (kasan_enabled() && !stack_hash_order)
stack_hash_order = STACK_HASH_ORDER_MAX;
if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0;
+ /*
+ * If stack_hash_order is not set, leave entries as 0 to rely on the
+ * automatic calculations performed by alloc_large_system_hash.
+ */
if (stack_hash_order)
- entries = 1UL << stack_hash_order;
+ entries = 1UL << stack_hash_order;
+ pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *),
entries,
@@ -142,7 +152,6 @@ int __init stack_depot_early_init(void)
&stack_hash_mask,
1UL << STACK_HASH_ORDER_MIN,
1UL << STACK_HASH_ORDER_MAX);
-
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
@@ -152,6 +161,7 @@ int __init stack_depot_early_init(void)
return 0;
}
+/* Allocates a hash table via kvcalloc. Can be used after boot. */
int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
@@ -160,11 +170,16 @@ int stack_depot_init(void)
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disabled && !stack_table) {
unsigned long entries;
- int scale = STACK_HASH_SCALE;
+ /*
+ * Similarly to stack_depot_early_init, use stack_hash_order
+ * if assigned, and rely on automatic scaling otherwise.
+ */
if (stack_hash_order) {
entries = 1UL << stack_hash_order;
} else {
+ int scale = STACK_HASH_SCALE;
+
entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries);
@@ -179,7 +194,7 @@ int stack_depot_init(void)
if (entries > 1UL << STACK_HASH_ORDER_MAX)
entries = 1UL << STACK_HASH_ORDER_MAX;
- pr_info("allocating hash table of %lu entries with kvcalloc\n",
+ pr_info("allocating hash table of %lu entries via kvcalloc\n",
entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
--
2.25.1