1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic stack depot for storing stack traces.
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
19 * Based on code by Dmitry Chernenkov.
22 #define pr_fmt(fmt) "stackdepot: " fmt
24 #include <linux/gfp.h>
25 #include <linux/jhash.h>
26 #include <linux/kernel.h>
28 #include <linux/mutex.h>
29 #include <linux/percpu.h>
30 #include <linux/printk.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/stackdepot.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/memblock.h>
37 #include <linux/kasan-enabled.h>
39 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
41 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
42 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
43 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
44 #define STACK_ALLOC_ALIGN 4
45 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
47 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
48 STACK_ALLOC_NULL_PROTECTION_BITS - \
49 STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
50 #define STACK_ALLOC_SLABS_CAP 8192
51 #define STACK_ALLOC_MAX_SLABS \
52 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
53 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
55 /* The compact structure to store the reference to stacks. */
57 depot_stack_handle_t handle;
59 u32 slabindex : STACK_ALLOC_INDEX_BITS;
60 u32 offset : STACK_ALLOC_OFFSET_BITS;
61 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
62 u32 extra : STACK_DEPOT_EXTRA_BITS;
67 struct stack_record *next; /* Link in the hashtable */
68 u32 hash; /* Hash in the hastable */
69 u32 size; /* Number of frames in the stack */
70 union handle_parts handle;
71 unsigned long entries[]; /* Variable-sized array of entries. */
74 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
75 static bool __stack_depot_early_init_passed __initdata;
77 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
79 static int depot_index;
80 static int next_slab_inited;
81 static size_t depot_offset;
82 static DEFINE_RAW_SPINLOCK(depot_lock);
84 /* one hash table bucket entry per 16kB of memory */
85 #define STACK_HASH_SCALE 14
86 /* limited between 4k and 1M buckets */
87 #define STACK_HASH_ORDER_MIN 12
88 #define STACK_HASH_ORDER_MAX 20
89 #define STACK_HASH_SEED 0x9747b28c
91 static unsigned int stack_hash_order;
92 static unsigned int stack_hash_mask;
94 static bool stack_depot_disable;
95 static struct stack_record **stack_table;
97 static int __init is_stack_depot_disabled(char *str)
101 ret = kstrtobool(str, &stack_depot_disable);
102 if (!ret && stack_depot_disable) {
103 pr_info("disabled\n");
108 early_param("stack_depot_disable", is_stack_depot_disabled);
110 void __init stack_depot_request_early_init(void)
112 /* Too late to request early init now. */
113 WARN_ON(__stack_depot_early_init_passed);
115 __stack_depot_early_init_requested = true;
118 int __init stack_depot_early_init(void)
120 unsigned long entries = 0;
122 /* This is supposed to be called only once, from mm_init() */
123 if (WARN_ON(__stack_depot_early_init_passed))
126 __stack_depot_early_init_passed = true;
128 if (kasan_enabled() && !stack_hash_order)
129 stack_hash_order = STACK_HASH_ORDER_MAX;
131 if (!__stack_depot_early_init_requested || stack_depot_disable)
134 if (stack_hash_order)
135 entries = 1UL << stack_hash_order;
136 stack_table = alloc_large_system_hash("stackdepot",
137 sizeof(struct stack_record *),
140 HASH_EARLY | HASH_ZERO,
143 1UL << STACK_HASH_ORDER_MIN,
144 1UL << STACK_HASH_ORDER_MAX);
147 pr_err("hash table allocation failed, disabling\n");
148 stack_depot_disable = true;
155 int stack_depot_init(void)
157 static DEFINE_MUTEX(stack_depot_init_mutex);
160 mutex_lock(&stack_depot_init_mutex);
161 if (!stack_depot_disable && !stack_table) {
162 unsigned long entries;
163 int scale = STACK_HASH_SCALE;
165 if (stack_hash_order) {
166 entries = 1UL << stack_hash_order;
168 entries = nr_free_buffer_pages();
169 entries = roundup_pow_of_two(entries);
171 if (scale > PAGE_SHIFT)
172 entries >>= (scale - PAGE_SHIFT);
174 entries <<= (PAGE_SHIFT - scale);
177 if (entries < 1UL << STACK_HASH_ORDER_MIN)
178 entries = 1UL << STACK_HASH_ORDER_MIN;
179 if (entries > 1UL << STACK_HASH_ORDER_MAX)
180 entries = 1UL << STACK_HASH_ORDER_MAX;
182 pr_info("allocating hash table of %lu entries with kvcalloc\n",
184 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
186 pr_err("hash table allocation failed, disabling\n");
187 stack_depot_disable = true;
190 stack_hash_mask = entries - 1;
192 mutex_unlock(&stack_depot_init_mutex);
195 EXPORT_SYMBOL_GPL(stack_depot_init);
197 static bool init_stack_slab(void **prealloc)
202 * This smp_load_acquire() pairs with smp_store_release() to
203 * |next_slab_inited| below and in depot_alloc_stack().
205 if (smp_load_acquire(&next_slab_inited))
207 if (stack_slabs[depot_index] == NULL) {
208 stack_slabs[depot_index] = *prealloc;
211 /* If this is the last depot slab, do not touch the next one. */
212 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
213 stack_slabs[depot_index + 1] = *prealloc;
217 * This smp_store_release pairs with smp_load_acquire() from
218 * |next_slab_inited| above and in stack_depot_save().
220 smp_store_release(&next_slab_inited, 1);
225 /* Allocation of a new stack in raw storage */
226 static struct stack_record *
227 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
229 struct stack_record *stack;
230 size_t required_size = struct_size(stack, entries, size);
232 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
234 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
235 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
236 WARN_ONCE(1, "Stack depot reached limit capacity");
242 * smp_store_release() here pairs with smp_load_acquire() from
243 * |next_slab_inited| in stack_depot_save() and
246 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
247 smp_store_release(&next_slab_inited, 0);
249 init_stack_slab(prealloc);
250 if (stack_slabs[depot_index] == NULL)
253 stack = stack_slabs[depot_index] + depot_offset;
257 stack->handle.slabindex = depot_index;
258 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
259 stack->handle.valid = 1;
260 stack->handle.extra = 0;
261 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
262 depot_offset += required_size;
267 /* Calculate hash for a stack */
268 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
270 return jhash2((u32 *)entries,
271 array_size(size, sizeof(*entries)) / sizeof(u32),
275 /* Use our own, non-instrumented version of memcmp().
277 * We actually don't care about the order, just the equality.
280 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
283 for ( ; n-- ; u1++, u2++) {
290 /* Find a stack that is equal to the one stored in entries in the hash */
291 static inline struct stack_record *find_stack(struct stack_record *bucket,
292 unsigned long *entries, int size,
295 struct stack_record *found;
297 for (found = bucket; found; found = found->next) {
298 if (found->hash == hash &&
299 found->size == size &&
300 !stackdepot_memcmp(entries, found->entries, size))
307 * __stack_depot_save - Save a stack trace from an array
309 * @entries: Pointer to storage array
310 * @nr_entries: Size of the storage array
311 * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
312 * @alloc_flags: Allocation gfp flags
313 * @can_alloc: Allocate stack slabs (increased chance of failure if false)
315 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
316 * %true, is allowed to replenish the stack slab pool in case no space is left
317 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
318 * any allocations and will fail if no space is left to store the stack trace.
320 * If the stack trace in @entries is from an interrupt, only the portion up to
321 * interrupt entry is saved.
323 * Additional opaque flags can be passed in @extra_bits, stored in the unused
324 * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
325 * without calling stack_depot_fetch().
327 * Context: Any context, but setting @can_alloc to %false is required if
328 * alloc_pages() cannot be used from the current context. Currently
329 * this is the case from contexts where neither %GFP_ATOMIC nor
330 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
332 * Return: The handle of the stack struct stored in depot, 0 on failure.
334 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
335 unsigned int nr_entries,
336 unsigned int extra_bits,
337 gfp_t alloc_flags, bool can_alloc)
339 struct stack_record *found = NULL, **bucket;
340 union handle_parts retval = { .handle = 0 };
341 struct page *page = NULL;
342 void *prealloc = NULL;
347 * If this stack trace is from an interrupt, including anything before
348 * interrupt entry usually leads to unbounded stackdepot growth.
350 * Because use of filter_irq_stacks() is a requirement to ensure
351 * stackdepot can efficiently deduplicate interrupt stacks, always
352 * filter_irq_stacks() to simplify all callers' use of stackdepot.
354 nr_entries = filter_irq_stacks(entries, nr_entries);
356 if (unlikely(nr_entries == 0) || stack_depot_disable)
359 hash = hash_stack(entries, nr_entries);
360 bucket = &stack_table[hash & stack_hash_mask];
363 * Fast path: look the stack trace up without locking.
364 * The smp_load_acquire() here pairs with smp_store_release() to
367 found = find_stack(smp_load_acquire(bucket), entries,
373 * Check if the current or the next stack slab need to be initialized.
374 * If so, allocate the memory - we won't be able to do that under the
377 * The smp_load_acquire() here pairs with smp_store_release() to
378 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
380 if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
382 * Zero out zone modifiers, as we don't have specific zone
383 * requirements. Keep the flags related to allocation in atomic
386 alloc_flags &= ~GFP_ZONEMASK;
387 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
388 alloc_flags |= __GFP_NOWARN;
389 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
391 prealloc = page_address(page);
394 raw_spin_lock_irqsave(&depot_lock, flags);
396 found = find_stack(*bucket, entries, nr_entries, hash);
398 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
403 * This smp_store_release() pairs with
404 * smp_load_acquire() from |bucket| above.
406 smp_store_release(bucket, new);
409 } else if (prealloc) {
411 * We didn't need to store this stack trace, but let's keep
412 * the preallocated memory for the future.
414 WARN_ON(!init_stack_slab(&prealloc));
417 raw_spin_unlock_irqrestore(&depot_lock, flags);
420 /* Nobody used this memory, ok to free it. */
421 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
424 retval.handle = found->handle.handle;
426 retval.extra = extra_bits;
428 return retval.handle;
430 EXPORT_SYMBOL_GPL(__stack_depot_save);
433 * stack_depot_save - Save a stack trace from an array
435 * @entries: Pointer to storage array
436 * @nr_entries: Size of the storage array
437 * @alloc_flags: Allocation gfp flags
439 * Context: Contexts where allocations via alloc_pages() are allowed.
440 * See __stack_depot_save() for more details.
442 * Return: The handle of the stack struct stored in depot, 0 on failure.
444 depot_stack_handle_t stack_depot_save(unsigned long *entries,
445 unsigned int nr_entries,
448 return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
450 EXPORT_SYMBOL_GPL(stack_depot_save);
453 * stack_depot_fetch - Fetch stack entries from a depot
455 * @handle: Stack depot handle which was returned from
456 * stack_depot_save().
457 * @entries: Pointer to store the entries address
459 * Return: The number of trace entries for this depot.
461 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
462 unsigned long **entries)
464 union handle_parts parts = { .handle = handle };
466 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
467 struct stack_record *stack;
473 if (parts.slabindex > depot_index) {
474 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
475 parts.slabindex, depot_index, handle);
478 slab = stack_slabs[parts.slabindex];
481 stack = slab + offset;
483 *entries = stack->entries;
486 EXPORT_SYMBOL_GPL(stack_depot_fetch);
489 * stack_depot_print - print stack entries from a depot
491 * @stack: Stack depot handle which was returned from
492 * stack_depot_save().
495 void stack_depot_print(depot_stack_handle_t stack)
497 unsigned long *entries;
498 unsigned int nr_entries;
500 nr_entries = stack_depot_fetch(stack, &entries);
502 stack_trace_print(entries, nr_entries, 0);
504 EXPORT_SYMBOL_GPL(stack_depot_print);
507 * stack_depot_snprint - print stack entries from a depot into a buffer
509 * @handle: Stack depot handle which was returned from
510 * stack_depot_save().
511 * @buf: Pointer to the print buffer
513 * @size: Size of the print buffer
515 * @spaces: Number of leading spaces to print
517 * Return: Number of bytes printed.
519 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
522 unsigned long *entries;
523 unsigned int nr_entries;
525 nr_entries = stack_depot_fetch(handle, &entries);
526 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
529 EXPORT_SYMBOL_GPL(stack_depot_snprint);
531 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
533 union handle_parts parts = { .handle = handle };
537 EXPORT_SYMBOL(stack_depot_get_extra_bits);