1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic stack depot for storing stack traces.
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
19 * Based on code by Dmitry Chernenkov.
22 #define pr_fmt(fmt) "stackdepot: " fmt
24 #include <linux/gfp.h>
25 #include <linux/jhash.h>
26 #include <linux/kernel.h>
28 #include <linux/mutex.h>
29 #include <linux/percpu.h>
30 #include <linux/printk.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/stackdepot.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/memblock.h>
37 #include <linux/kasan-enabled.h>
39 #define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
41 #define DEPOT_VALID_BITS 1
42 #define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
43 #define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
44 #define DEPOT_STACK_ALIGN 4
45 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
46 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
47 DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
48 #define DEPOT_POOLS_CAP 8192
49 #define DEPOT_MAX_POOLS \
50 (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
51 (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
53 /* The compact structure to store the reference to stacks. */
55 depot_stack_handle_t handle;
57 u32 pool_index : DEPOT_POOL_INDEX_BITS;
58 u32 offset : DEPOT_OFFSET_BITS;
59 u32 valid : DEPOT_VALID_BITS;
60 u32 extra : STACK_DEPOT_EXTRA_BITS;
65 struct stack_record *next; /* Link in the hashtable */
66 u32 hash; /* Hash in the hastable */
67 u32 size; /* Number of frames in the stack */
68 union handle_parts handle;
69 unsigned long entries[]; /* Variable-sized array of entries. */
72 static bool stack_depot_disabled;
73 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
74 static bool __stack_depot_early_init_passed __initdata;
76 /* Use one hash table bucket per 16 KB of memory. */
77 #define STACK_HASH_TABLE_SCALE 14
78 /* Limit the number of buckets between 4K and 1M. */
79 #define STACK_BUCKET_NUMBER_ORDER_MIN 12
80 #define STACK_BUCKET_NUMBER_ORDER_MAX 20
81 /* Initial seed for jhash2. */
82 #define STACK_HASH_SEED 0x9747b28c
84 /* Hash table of pointers to stored stack traces. */
85 static struct stack_record **stack_table;
86 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
87 static unsigned int stack_bucket_number_order;
88 /* Hash mask for indexing the table. */
89 static unsigned int stack_hash_mask;
91 /* Array of memory regions that store stack traces. */
92 static void *stack_pools[DEPOT_MAX_POOLS];
93 /* Currently used pool in stack_pools. */
94 static int pool_index;
95 /* Offset to the unused space in the currently used pool. */
96 static size_t pool_offset;
97 /* Lock that protects the variables above. */
98 static DEFINE_RAW_SPINLOCK(pool_lock);
100 * Stack depot tries to keep an extra pool allocated even before it runs out
101 * of space in the currently used pool.
102 * This flag marks that this next extra pool needs to be allocated and
103 * initialized. It has the value 0 when either the next pool is not yet
104 * initialized or the limit on the number of pools is reached.
106 static int next_pool_required = 1;
108 static int __init disable_stack_depot(char *str)
112 ret = kstrtobool(str, &stack_depot_disabled);
113 if (!ret && stack_depot_disabled) {
114 pr_info("disabled\n");
119 early_param("stack_depot_disable", disable_stack_depot);
121 void __init stack_depot_request_early_init(void)
123 /* Too late to request early init now. */
124 WARN_ON(__stack_depot_early_init_passed);
126 __stack_depot_early_init_requested = true;
129 /* Allocates a hash table via memblock. Can only be used during early boot. */
130 int __init stack_depot_early_init(void)
132 unsigned long entries = 0;
134 /* This function must be called only once, from mm_init(). */
135 if (WARN_ON(__stack_depot_early_init_passed))
137 __stack_depot_early_init_passed = true;
140 * If KASAN is enabled, use the maximum order: KASAN is frequently used
141 * in fuzzing scenarios, which leads to a large number of different
142 * stack traces being stored in stack depot.
144 if (kasan_enabled() && !stack_bucket_number_order)
145 stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
147 if (!__stack_depot_early_init_requested || stack_depot_disabled)
151 * If stack_bucket_number_order is not set, leave entries as 0 to rely
152 * on the automatic calculations performed by alloc_large_system_hash.
154 if (stack_bucket_number_order)
155 entries = 1UL << stack_bucket_number_order;
156 pr_info("allocating hash table via alloc_large_system_hash\n");
157 stack_table = alloc_large_system_hash("stackdepot",
158 sizeof(struct stack_record *),
160 STACK_HASH_TABLE_SCALE,
161 HASH_EARLY | HASH_ZERO,
164 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
165 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
167 pr_err("hash table allocation failed, disabling\n");
168 stack_depot_disabled = true;
175 /* Allocates a hash table via kvcalloc. Can be used after boot. */
176 int stack_depot_init(void)
178 static DEFINE_MUTEX(stack_depot_init_mutex);
179 unsigned long entries;
182 mutex_lock(&stack_depot_init_mutex);
184 if (stack_depot_disabled || stack_table)
188 * Similarly to stack_depot_early_init, use stack_bucket_number_order
189 * if assigned, and rely on automatic scaling otherwise.
191 if (stack_bucket_number_order) {
192 entries = 1UL << stack_bucket_number_order;
194 int scale = STACK_HASH_TABLE_SCALE;
196 entries = nr_free_buffer_pages();
197 entries = roundup_pow_of_two(entries);
199 if (scale > PAGE_SHIFT)
200 entries >>= (scale - PAGE_SHIFT);
202 entries <<= (PAGE_SHIFT - scale);
205 if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
206 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
207 if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
208 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
210 pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
211 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
213 pr_err("hash table allocation failed, disabling\n");
214 stack_depot_disabled = true;
218 stack_hash_mask = entries - 1;
221 mutex_unlock(&stack_depot_init_mutex);
225 EXPORT_SYMBOL_GPL(stack_depot_init);
227 /* Uses preallocated memory to initialize a new stack depot pool. */
228 static void depot_init_pool(void **prealloc)
231 * If the next pool is already initialized or the maximum number of
232 * pools is reached, do not use the preallocated memory.
233 * smp_load_acquire() here pairs with smp_store_release() below and
234 * in depot_alloc_stack().
236 if (!smp_load_acquire(&next_pool_required))
239 /* Check if the current pool is not yet allocated. */
240 if (stack_pools[pool_index] == NULL) {
241 /* Use the preallocated memory for the current pool. */
242 stack_pools[pool_index] = *prealloc;
246 * Otherwise, use the preallocated memory for the next pool
247 * as long as we do not exceed the maximum number of pools.
249 if (pool_index + 1 < DEPOT_MAX_POOLS) {
250 stack_pools[pool_index + 1] = *prealloc;
254 * At this point, either the next pool is initialized or the
255 * maximum number of pools is reached. In either case, take
256 * note that initializing another pool is not required.
257 * This smp_store_release pairs with smp_load_acquire() above
258 * and in stack_depot_save().
260 smp_store_release(&next_pool_required, 0);
264 /* Allocates a new stack in a stack depot pool. */
265 static struct stack_record *
266 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
268 struct stack_record *stack;
269 size_t required_size = struct_size(stack, entries, size);
271 required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
273 /* Check if there is not enough space in the current pool. */
274 if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
275 /* Bail out if we reached the pool limit. */
276 if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
277 WARN_ONCE(1, "Stack depot reached limit capacity");
281 /* Move on to the next pool. */
285 * If the maximum number of pools is not reached, take note
286 * that the next pool needs to initialized.
287 * smp_store_release() here pairs with smp_load_acquire() in
288 * stack_depot_save() and depot_init_pool().
290 if (pool_index + 1 < DEPOT_MAX_POOLS)
291 smp_store_release(&next_pool_required, 1);
294 /* Assign the preallocated memory to a pool if required. */
296 depot_init_pool(prealloc);
298 /* Check if we have a pool to save the stack trace. */
299 if (stack_pools[pool_index] == NULL)
302 /* Save the stack trace. */
303 stack = stack_pools[pool_index] + pool_offset;
306 stack->handle.pool_index = pool_index;
307 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
308 stack->handle.valid = 1;
309 stack->handle.extra = 0;
310 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
311 pool_offset += required_size;
316 /* Calculate hash for a stack */
317 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
319 return jhash2((u32 *)entries,
320 array_size(size, sizeof(*entries)) / sizeof(u32),
324 /* Use our own, non-instrumented version of memcmp().
326 * We actually don't care about the order, just the equality.
329 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
332 for ( ; n-- ; u1++, u2++) {
339 /* Find a stack that is equal to the one stored in entries in the hash */
340 static inline struct stack_record *find_stack(struct stack_record *bucket,
341 unsigned long *entries, int size,
344 struct stack_record *found;
346 for (found = bucket; found; found = found->next) {
347 if (found->hash == hash &&
348 found->size == size &&
349 !stackdepot_memcmp(entries, found->entries, size))
356 * __stack_depot_save - Save a stack trace from an array
358 * @entries: Pointer to storage array
359 * @nr_entries: Size of the storage array
360 * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
361 * @alloc_flags: Allocation gfp flags
362 * @can_alloc: Allocate stack pools (increased chance of failure if false)
364 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
365 * %true, is allowed to replenish the stack pool in case no space is left
366 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
367 * any allocations and will fail if no space is left to store the stack trace.
369 * If the stack trace in @entries is from an interrupt, only the portion up to
370 * interrupt entry is saved.
372 * Additional opaque flags can be passed in @extra_bits, stored in the unused
373 * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
374 * without calling stack_depot_fetch().
376 * Context: Any context, but setting @can_alloc to %false is required if
377 * alloc_pages() cannot be used from the current context. Currently
378 * this is the case from contexts where neither %GFP_ATOMIC nor
379 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
381 * Return: The handle of the stack struct stored in depot, 0 on failure.
383 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
384 unsigned int nr_entries,
385 unsigned int extra_bits,
386 gfp_t alloc_flags, bool can_alloc)
388 struct stack_record *found = NULL, **bucket;
389 union handle_parts retval = { .handle = 0 };
390 struct page *page = NULL;
391 void *prealloc = NULL;
396 * If this stack trace is from an interrupt, including anything before
397 * interrupt entry usually leads to unbounded stackdepot growth.
399 * Because use of filter_irq_stacks() is a requirement to ensure
400 * stackdepot can efficiently deduplicate interrupt stacks, always
401 * filter_irq_stacks() to simplify all callers' use of stackdepot.
403 nr_entries = filter_irq_stacks(entries, nr_entries);
405 if (unlikely(nr_entries == 0) || stack_depot_disabled)
408 hash = hash_stack(entries, nr_entries);
409 bucket = &stack_table[hash & stack_hash_mask];
412 * Fast path: look the stack trace up without locking.
413 * The smp_load_acquire() here pairs with smp_store_release() to
416 found = find_stack(smp_load_acquire(bucket), entries,
422 * Check if another stack pool needs to be initialized. If so, allocate
423 * the memory now - we won't be able to do that under the lock.
425 * The smp_load_acquire() here pairs with smp_store_release() to
426 * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
428 if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
430 * Zero out zone modifiers, as we don't have specific zone
431 * requirements. Keep the flags related to allocation in atomic
434 alloc_flags &= ~GFP_ZONEMASK;
435 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
436 alloc_flags |= __GFP_NOWARN;
437 page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
439 prealloc = page_address(page);
442 raw_spin_lock_irqsave(&pool_lock, flags);
444 found = find_stack(*bucket, entries, nr_entries, hash);
446 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
451 * This smp_store_release() pairs with
452 * smp_load_acquire() from |bucket| above.
454 smp_store_release(bucket, new);
457 } else if (prealloc) {
459 * We didn't need to store this stack trace, but let's keep
460 * the preallocated memory for the future.
462 depot_init_pool(&prealloc);
465 raw_spin_unlock_irqrestore(&pool_lock, flags);
468 /* Nobody used this memory, ok to free it. */
469 free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
472 retval.handle = found->handle.handle;
474 retval.extra = extra_bits;
476 return retval.handle;
478 EXPORT_SYMBOL_GPL(__stack_depot_save);
481 * stack_depot_save - Save a stack trace from an array
483 * @entries: Pointer to storage array
484 * @nr_entries: Size of the storage array
485 * @alloc_flags: Allocation gfp flags
487 * Context: Contexts where allocations via alloc_pages() are allowed.
488 * See __stack_depot_save() for more details.
490 * Return: The handle of the stack struct stored in depot, 0 on failure.
492 depot_stack_handle_t stack_depot_save(unsigned long *entries,
493 unsigned int nr_entries,
496 return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
498 EXPORT_SYMBOL_GPL(stack_depot_save);
501 * stack_depot_fetch - Fetch stack entries from a depot
503 * @handle: Stack depot handle which was returned from
504 * stack_depot_save().
505 * @entries: Pointer to store the entries address
507 * Return: The number of trace entries for this depot.
509 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
510 unsigned long **entries)
512 union handle_parts parts = { .handle = handle };
514 size_t offset = parts.offset << DEPOT_STACK_ALIGN;
515 struct stack_record *stack;
521 if (parts.pool_index > pool_index) {
522 WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
523 parts.pool_index, pool_index, handle);
526 pool = stack_pools[parts.pool_index];
529 stack = pool + offset;
531 *entries = stack->entries;
534 EXPORT_SYMBOL_GPL(stack_depot_fetch);
537 * stack_depot_print - print stack entries from a depot
539 * @stack: Stack depot handle which was returned from
540 * stack_depot_save().
543 void stack_depot_print(depot_stack_handle_t stack)
545 unsigned long *entries;
546 unsigned int nr_entries;
548 nr_entries = stack_depot_fetch(stack, &entries);
550 stack_trace_print(entries, nr_entries, 0);
552 EXPORT_SYMBOL_GPL(stack_depot_print);
555 * stack_depot_snprint - print stack entries from a depot into a buffer
557 * @handle: Stack depot handle which was returned from
558 * stack_depot_save().
559 * @buf: Pointer to the print buffer
561 * @size: Size of the print buffer
563 * @spaces: Number of leading spaces to print
565 * Return: Number of bytes printed.
567 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
570 unsigned long *entries;
571 unsigned int nr_entries;
573 nr_entries = stack_depot_fetch(handle, &entries);
574 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
577 EXPORT_SYMBOL_GPL(stack_depot_snprint);
579 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
581 union handle_parts parts = { .handle = handle };
585 EXPORT_SYMBOL(stack_depot_get_extra_bits);