1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic stack depot for storing stack traces.
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
19 * Based on code by Dmitry Chernenkov.
22 #define pr_fmt(fmt) "stackdepot: " fmt
24 #include <linux/gfp.h>
25 #include <linux/jhash.h>
26 #include <linux/kernel.h>
28 #include <linux/mutex.h>
29 #include <linux/percpu.h>
30 #include <linux/printk.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/stackdepot.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/memblock.h>
37 #include <linux/kasan-enabled.h>
39 #define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
41 #define DEPOT_VALID_BITS 1
42 #define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
43 #define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
44 #define DEPOT_STACK_ALIGN 4
45 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
46 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
47 DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
48 #define DEPOT_POOLS_CAP 8192
49 #define DEPOT_MAX_POOLS \
50 (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
51 (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
53 /* The compact structure to store the reference to stacks. */
55 depot_stack_handle_t handle;
57 u32 pool_index : DEPOT_POOL_INDEX_BITS;
58 u32 offset : DEPOT_OFFSET_BITS;
59 u32 valid : DEPOT_VALID_BITS;
60 u32 extra : STACK_DEPOT_EXTRA_BITS;
65 struct stack_record *next; /* Link in the hashtable */
66 u32 hash; /* Hash in the hastable */
67 u32 size; /* Number of frames in the stack */
68 union handle_parts handle;
69 unsigned long entries[]; /* Variable-sized array of entries. */
72 static bool stack_depot_disabled;
73 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
74 static bool __stack_depot_early_init_passed __initdata;
76 /* Use one hash table bucket per 16 KB of memory. */
77 #define STACK_HASH_TABLE_SCALE 14
78 /* Limit the number of buckets between 4K and 1M. */
79 #define STACK_BUCKET_NUMBER_ORDER_MIN 12
80 #define STACK_BUCKET_NUMBER_ORDER_MAX 20
81 /* Initial seed for jhash2. */
82 #define STACK_HASH_SEED 0x9747b28c
84 /* Hash table of pointers to stored stack traces. */
85 static struct stack_record **stack_table;
86 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
87 static unsigned int stack_bucket_number_order;
88 /* Hash mask for indexing the table. */
89 static unsigned int stack_hash_mask;
91 /* Array of memory regions that store stack traces. */
92 static void *stack_pools[DEPOT_MAX_POOLS];
93 /* Currently used pool in stack_pools. */
94 static int pool_index;
95 /* Offset to the unused space in the currently used pool. */
96 static size_t pool_offset;
97 /* Lock that protects the variables above. */
98 static DEFINE_RAW_SPINLOCK(pool_lock);
99 /* Whether the next pool is initialized. */
100 static int next_pool_inited;
102 static int __init disable_stack_depot(char *str)
106 ret = kstrtobool(str, &stack_depot_disabled);
107 if (!ret && stack_depot_disabled) {
108 pr_info("disabled\n");
113 early_param("stack_depot_disable", disable_stack_depot);
115 void __init stack_depot_request_early_init(void)
117 /* Too late to request early init now. */
118 WARN_ON(__stack_depot_early_init_passed);
120 __stack_depot_early_init_requested = true;
123 /* Allocates a hash table via memblock. Can only be used during early boot. */
124 int __init stack_depot_early_init(void)
126 unsigned long entries = 0;
128 /* This function must be called only once, from mm_init(). */
129 if (WARN_ON(__stack_depot_early_init_passed))
131 __stack_depot_early_init_passed = true;
134 * If KASAN is enabled, use the maximum order: KASAN is frequently used
135 * in fuzzing scenarios, which leads to a large number of different
136 * stack traces being stored in stack depot.
138 if (kasan_enabled() && !stack_bucket_number_order)
139 stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
141 if (!__stack_depot_early_init_requested || stack_depot_disabled)
145 * If stack_bucket_number_order is not set, leave entries as 0 to rely
146 * on the automatic calculations performed by alloc_large_system_hash.
148 if (stack_bucket_number_order)
149 entries = 1UL << stack_bucket_number_order;
150 pr_info("allocating hash table via alloc_large_system_hash\n");
151 stack_table = alloc_large_system_hash("stackdepot",
152 sizeof(struct stack_record *),
154 STACK_HASH_TABLE_SCALE,
155 HASH_EARLY | HASH_ZERO,
158 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
159 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
161 pr_err("hash table allocation failed, disabling\n");
162 stack_depot_disabled = true;
169 /* Allocates a hash table via kvcalloc. Can be used after boot. */
170 int stack_depot_init(void)
172 static DEFINE_MUTEX(stack_depot_init_mutex);
173 unsigned long entries;
176 mutex_lock(&stack_depot_init_mutex);
178 if (stack_depot_disabled || stack_table)
182 * Similarly to stack_depot_early_init, use stack_bucket_number_order
183 * if assigned, and rely on automatic scaling otherwise.
185 if (stack_bucket_number_order) {
186 entries = 1UL << stack_bucket_number_order;
188 int scale = STACK_HASH_TABLE_SCALE;
190 entries = nr_free_buffer_pages();
191 entries = roundup_pow_of_two(entries);
193 if (scale > PAGE_SHIFT)
194 entries >>= (scale - PAGE_SHIFT);
196 entries <<= (PAGE_SHIFT - scale);
199 if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
200 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
201 if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
202 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
204 pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
205 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
207 pr_err("hash table allocation failed, disabling\n");
208 stack_depot_disabled = true;
212 stack_hash_mask = entries - 1;
215 mutex_unlock(&stack_depot_init_mutex);
219 EXPORT_SYMBOL_GPL(stack_depot_init);
221 static bool init_stack_pool(void **prealloc)
226 * This smp_load_acquire() pairs with smp_store_release() to
227 * |next_pool_inited| below and in depot_alloc_stack().
229 if (smp_load_acquire(&next_pool_inited))
231 if (stack_pools[pool_index] == NULL) {
232 stack_pools[pool_index] = *prealloc;
235 /* If this is the last depot pool, do not touch the next one. */
236 if (pool_index + 1 < DEPOT_MAX_POOLS) {
237 stack_pools[pool_index + 1] = *prealloc;
241 * This smp_store_release pairs with smp_load_acquire() from
242 * |next_pool_inited| above and in stack_depot_save().
244 smp_store_release(&next_pool_inited, 1);
249 /* Allocation of a new stack in raw storage */
250 static struct stack_record *
251 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
253 struct stack_record *stack;
254 size_t required_size = struct_size(stack, entries, size);
256 required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
258 if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
259 if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
260 WARN_ONCE(1, "Stack depot reached limit capacity");
266 * smp_store_release() here pairs with smp_load_acquire() from
267 * |next_pool_inited| in stack_depot_save() and
270 if (pool_index + 1 < DEPOT_MAX_POOLS)
271 smp_store_release(&next_pool_inited, 0);
273 init_stack_pool(prealloc);
274 if (stack_pools[pool_index] == NULL)
277 stack = stack_pools[pool_index] + pool_offset;
281 stack->handle.pool_index = pool_index;
282 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
283 stack->handle.valid = 1;
284 stack->handle.extra = 0;
285 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
286 pool_offset += required_size;
291 /* Calculate hash for a stack */
292 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
294 return jhash2((u32 *)entries,
295 array_size(size, sizeof(*entries)) / sizeof(u32),
299 /* Use our own, non-instrumented version of memcmp().
301 * We actually don't care about the order, just the equality.
304 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
307 for ( ; n-- ; u1++, u2++) {
314 /* Find a stack that is equal to the one stored in entries in the hash */
315 static inline struct stack_record *find_stack(struct stack_record *bucket,
316 unsigned long *entries, int size,
319 struct stack_record *found;
321 for (found = bucket; found; found = found->next) {
322 if (found->hash == hash &&
323 found->size == size &&
324 !stackdepot_memcmp(entries, found->entries, size))
331 * __stack_depot_save - Save a stack trace from an array
333 * @entries: Pointer to storage array
334 * @nr_entries: Size of the storage array
335 * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
336 * @alloc_flags: Allocation gfp flags
337 * @can_alloc: Allocate stack pools (increased chance of failure if false)
339 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
340 * %true, is allowed to replenish the stack pool in case no space is left
341 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
342 * any allocations and will fail if no space is left to store the stack trace.
344 * If the stack trace in @entries is from an interrupt, only the portion up to
345 * interrupt entry is saved.
347 * Additional opaque flags can be passed in @extra_bits, stored in the unused
348 * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
349 * without calling stack_depot_fetch().
351 * Context: Any context, but setting @can_alloc to %false is required if
352 * alloc_pages() cannot be used from the current context. Currently
353 * this is the case from contexts where neither %GFP_ATOMIC nor
354 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
356 * Return: The handle of the stack struct stored in depot, 0 on failure.
358 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
359 unsigned int nr_entries,
360 unsigned int extra_bits,
361 gfp_t alloc_flags, bool can_alloc)
363 struct stack_record *found = NULL, **bucket;
364 union handle_parts retval = { .handle = 0 };
365 struct page *page = NULL;
366 void *prealloc = NULL;
371 * If this stack trace is from an interrupt, including anything before
372 * interrupt entry usually leads to unbounded stackdepot growth.
374 * Because use of filter_irq_stacks() is a requirement to ensure
375 * stackdepot can efficiently deduplicate interrupt stacks, always
376 * filter_irq_stacks() to simplify all callers' use of stackdepot.
378 nr_entries = filter_irq_stacks(entries, nr_entries);
380 if (unlikely(nr_entries == 0) || stack_depot_disabled)
383 hash = hash_stack(entries, nr_entries);
384 bucket = &stack_table[hash & stack_hash_mask];
387 * Fast path: look the stack trace up without locking.
388 * The smp_load_acquire() here pairs with smp_store_release() to
391 found = find_stack(smp_load_acquire(bucket), entries,
397 * Check if the current or the next stack pool need to be initialized.
398 * If so, allocate the memory - we won't be able to do that under the
401 * The smp_load_acquire() here pairs with smp_store_release() to
402 * |next_pool_inited| in depot_alloc_stack() and init_stack_pool().
404 if (unlikely(can_alloc && !smp_load_acquire(&next_pool_inited))) {
406 * Zero out zone modifiers, as we don't have specific zone
407 * requirements. Keep the flags related to allocation in atomic
410 alloc_flags &= ~GFP_ZONEMASK;
411 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
412 alloc_flags |= __GFP_NOWARN;
413 page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
415 prealloc = page_address(page);
418 raw_spin_lock_irqsave(&pool_lock, flags);
420 found = find_stack(*bucket, entries, nr_entries, hash);
422 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
427 * This smp_store_release() pairs with
428 * smp_load_acquire() from |bucket| above.
430 smp_store_release(bucket, new);
433 } else if (prealloc) {
435 * We didn't need to store this stack trace, but let's keep
436 * the preallocated memory for the future.
438 WARN_ON(!init_stack_pool(&prealloc));
441 raw_spin_unlock_irqrestore(&pool_lock, flags);
444 /* Nobody used this memory, ok to free it. */
445 free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
448 retval.handle = found->handle.handle;
450 retval.extra = extra_bits;
452 return retval.handle;
454 EXPORT_SYMBOL_GPL(__stack_depot_save);
457 * stack_depot_save - Save a stack trace from an array
459 * @entries: Pointer to storage array
460 * @nr_entries: Size of the storage array
461 * @alloc_flags: Allocation gfp flags
463 * Context: Contexts where allocations via alloc_pages() are allowed.
464 * See __stack_depot_save() for more details.
466 * Return: The handle of the stack struct stored in depot, 0 on failure.
468 depot_stack_handle_t stack_depot_save(unsigned long *entries,
469 unsigned int nr_entries,
472 return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
474 EXPORT_SYMBOL_GPL(stack_depot_save);
477 * stack_depot_fetch - Fetch stack entries from a depot
479 * @handle: Stack depot handle which was returned from
480 * stack_depot_save().
481 * @entries: Pointer to store the entries address
483 * Return: The number of trace entries for this depot.
485 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
486 unsigned long **entries)
488 union handle_parts parts = { .handle = handle };
490 size_t offset = parts.offset << DEPOT_STACK_ALIGN;
491 struct stack_record *stack;
497 if (parts.pool_index > pool_index) {
498 WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
499 parts.pool_index, pool_index, handle);
502 pool = stack_pools[parts.pool_index];
505 stack = pool + offset;
507 *entries = stack->entries;
510 EXPORT_SYMBOL_GPL(stack_depot_fetch);
513 * stack_depot_print - print stack entries from a depot
515 * @stack: Stack depot handle which was returned from
516 * stack_depot_save().
519 void stack_depot_print(depot_stack_handle_t stack)
521 unsigned long *entries;
522 unsigned int nr_entries;
524 nr_entries = stack_depot_fetch(stack, &entries);
526 stack_trace_print(entries, nr_entries, 0);
528 EXPORT_SYMBOL_GPL(stack_depot_print);
531 * stack_depot_snprint - print stack entries from a depot into a buffer
533 * @handle: Stack depot handle which was returned from
534 * stack_depot_save().
535 * @buf: Pointer to the print buffer
537 * @size: Size of the print buffer
539 * @spaces: Number of leading spaces to print
541 * Return: Number of bytes printed.
543 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
546 unsigned long *entries;
547 unsigned int nr_entries;
549 nr_entries = stack_depot_fetch(handle, &entries);
550 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
553 EXPORT_SYMBOL_GPL(stack_depot_snprint);
555 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
557 union handle_parts parts = { .handle = handle };
561 EXPORT_SYMBOL(stack_depot_get_extra_bits);