lib/stackdepot: rename slab to pool
[linux-2.6-block.git] / lib / stackdepot.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
cd11016e
AP
2/*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
9dbbc3b9 14 * another in a contiguous memory allocation.
cd11016e
AP
15 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
cd11016e
AP
20 */
21
4a6b5314
AK
22#define pr_fmt(fmt) "stackdepot: " fmt
23
cd11016e
AP
24#include <linux/gfp.h>
25#include <linux/jhash.h>
26#include <linux/kernel.h>
27#include <linux/mm.h>
2dba5eb1 28#include <linux/mutex.h>
cd11016e
AP
29#include <linux/percpu.h>
30#include <linux/printk.h>
31#include <linux/slab.h>
32#include <linux/stacktrace.h>
33#include <linux/stackdepot.h>
34#include <linux/string.h>
35#include <linux/types.h>
e1fdc403 36#include <linux/memblock.h>
f9987921 37#include <linux/kasan-enabled.h>
cd11016e
AP
38
39#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
40
7c31190b 41#define STACK_ALLOC_NULL_PROTECTION_BITS 1
961c949b 42#define STACK_ALLOC_ORDER 2 /* Pool size order for stack depot, 4 pages */
cd11016e
AP
43#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
44#define STACK_ALLOC_ALIGN 4
45#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
46 STACK_ALLOC_ALIGN)
7c31190b 47#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
83a4f1ef
AP
48 STACK_ALLOC_NULL_PROTECTION_BITS - \
49 STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
961c949b
AK
50#define STACK_ALLOC_POOLS_CAP 8192
51#define STACK_ALLOC_MAX_POOLS \
52 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_POOLS_CAP) ? \
53 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_POOLS_CAP)
cd11016e
AP
54
55/* The compact structure to store the reference to stacks. */
56union handle_parts {
57 depot_stack_handle_t handle;
58 struct {
961c949b 59 u32 pool_index : STACK_ALLOC_INDEX_BITS;
cd11016e 60 u32 offset : STACK_ALLOC_OFFSET_BITS;
7c31190b 61 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
83a4f1ef 62 u32 extra : STACK_DEPOT_EXTRA_BITS;
cd11016e
AP
63 };
64};
65
66struct stack_record {
67 struct stack_record *next; /* Link in the hashtable */
68 u32 hash; /* Hash in the hastable */
69 u32 size; /* Number of frames in the stack */
70 union handle_parts handle;
3a2b67e6 71 unsigned long entries[]; /* Variable-sized array of entries. */
cd11016e
AP
72};
73
735df3c3 74static bool stack_depot_disabled;
1c0310ad 75static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
a5f1783b
VB
76static bool __stack_depot_early_init_passed __initdata;
77
0d249ac0 78/* Use one hash table bucket per 16 KB of memory. */
4c2e9a67 79#define STACK_HASH_TABLE_SCALE 14
0d249ac0 80/* Limit the number of buckets between 4K and 1M. */
4c2e9a67
AK
81#define STACK_BUCKET_NUMBER_ORDER_MIN 12
82#define STACK_BUCKET_NUMBER_ORDER_MAX 20
0d249ac0 83/* Initial seed for jhash2. */
cd11016e
AP
84#define STACK_HASH_SEED 0x9747b28c
85
0d249ac0
AK
86/* Hash table of pointers to stored stack traces. */
87static struct stack_record **stack_table;
88/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
4c2e9a67 89static unsigned int stack_bucket_number_order;
0d249ac0 90/* Hash mask for indexing the table. */
f9987921
VB
91static unsigned int stack_hash_mask;
92
0d249ac0 93/* Array of memory regions that store stack traces. */
961c949b
AK
94static void *stack_pools[STACK_ALLOC_MAX_POOLS];
95/* Currently used pool in stack_pools. */
96static int pool_index;
97/* Offset to the unused space in the currently used pool. */
98static size_t pool_offset;
0d249ac0 99/* Lock that protects the variables above. */
961c949b
AK
100static DEFINE_RAW_SPINLOCK(pool_lock);
101/* Whether the next pool is initialized. */
102static int next_pool_inited;
e1fdc403 103
735df3c3 104static int __init disable_stack_depot(char *str)
e1fdc403 105{
64427985
VJ
106 int ret;
107
735df3c3
AK
108 ret = kstrtobool(str, &stack_depot_disabled);
109 if (!ret && stack_depot_disabled) {
4a6b5314 110 pr_info("disabled\n");
e1fdc403
VJ
111 stack_table = NULL;
112 }
113 return 0;
114}
735df3c3 115early_param("stack_depot_disable", disable_stack_depot);
e1fdc403 116
1c0310ad 117void __init stack_depot_request_early_init(void)
a5f1783b 118{
1c0310ad 119 /* Too late to request early init now. */
a5f1783b
VB
120 WARN_ON(__stack_depot_early_init_passed);
121
1c0310ad 122 __stack_depot_early_init_requested = true;
a5f1783b
VB
123}
124
df225c87 125/* Allocates a hash table via memblock. Can only be used during early boot. */
a5f1783b
VB
126int __init stack_depot_early_init(void)
127{
f9987921 128 unsigned long entries = 0;
a5f1783b 129
df225c87 130 /* This function must be called only once, from mm_init(). */
a5f1783b
VB
131 if (WARN_ON(__stack_depot_early_init_passed))
132 return 0;
a5f1783b
VB
133 __stack_depot_early_init_passed = true;
134
df225c87
AK
135 /*
136 * If KASAN is enabled, use the maximum order: KASAN is frequently used
137 * in fuzzing scenarios, which leads to a large number of different
138 * stack traces being stored in stack depot.
139 */
4c2e9a67
AK
140 if (kasan_enabled() && !stack_bucket_number_order)
141 stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
f9987921 142
735df3c3 143 if (!__stack_depot_early_init_requested || stack_depot_disabled)
a5f1783b
VB
144 return 0;
145
df225c87 146 /*
4c2e9a67
AK
147 * If stack_bucket_number_order is not set, leave entries as 0 to rely
148 * on the automatic calculations performed by alloc_large_system_hash.
df225c87 149 */
4c2e9a67
AK
150 if (stack_bucket_number_order)
151 entries = 1UL << stack_bucket_number_order;
df225c87 152 pr_info("allocating hash table via alloc_large_system_hash\n");
f9987921
VB
153 stack_table = alloc_large_system_hash("stackdepot",
154 sizeof(struct stack_record *),
155 entries,
4c2e9a67 156 STACK_HASH_TABLE_SCALE,
f9987921
VB
157 HASH_EARLY | HASH_ZERO,
158 NULL,
159 &stack_hash_mask,
4c2e9a67
AK
160 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
161 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
a5f1783b 162 if (!stack_table) {
4a6b5314 163 pr_err("hash table allocation failed, disabling\n");
735df3c3 164 stack_depot_disabled = true;
a5f1783b
VB
165 return -ENOMEM;
166 }
167
168 return 0;
169}
170
df225c87 171/* Allocates a hash table via kvcalloc. Can be used after boot. */
a5f1783b 172int stack_depot_init(void)
e1fdc403 173{
2dba5eb1 174 static DEFINE_MUTEX(stack_depot_init_mutex);
c60324fb 175 unsigned long entries;
a5f1783b 176 int ret = 0;
2dba5eb1
VB
177
178 mutex_lock(&stack_depot_init_mutex);
f9987921 179
c60324fb
AK
180 if (stack_depot_disabled || stack_table)
181 goto out_unlock;
f9987921 182
c60324fb 183 /*
4c2e9a67 184 * Similarly to stack_depot_early_init, use stack_bucket_number_order
c60324fb
AK
185 * if assigned, and rely on automatic scaling otherwise.
186 */
4c2e9a67
AK
187 if (stack_bucket_number_order) {
188 entries = 1UL << stack_bucket_number_order;
c60324fb 189 } else {
4c2e9a67 190 int scale = STACK_HASH_TABLE_SCALE;
c60324fb
AK
191
192 entries = nr_free_buffer_pages();
193 entries = roundup_pow_of_two(entries);
194
195 if (scale > PAGE_SHIFT)
196 entries >>= (scale - PAGE_SHIFT);
197 else
198 entries <<= (PAGE_SHIFT - scale);
e1fdc403 199 }
c60324fb 200
4c2e9a67
AK
201 if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
202 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
203 if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
204 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
c60324fb
AK
205
206 pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
207 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
208 if (!stack_table) {
209 pr_err("hash table allocation failed, disabling\n");
210 stack_depot_disabled = true;
211 ret = -ENOMEM;
212 goto out_unlock;
213 }
214 stack_hash_mask = entries - 1;
215
216out_unlock:
2dba5eb1 217 mutex_unlock(&stack_depot_init_mutex);
c60324fb 218
a5f1783b 219 return ret;
e1fdc403 220}
2dba5eb1 221EXPORT_SYMBOL_GPL(stack_depot_init);
cd11016e 222
961c949b 223static bool init_stack_pool(void **prealloc)
15ef6a98
AK
224{
225 if (!*prealloc)
226 return false;
227 /*
228 * This smp_load_acquire() pairs with smp_store_release() to
961c949b 229 * |next_pool_inited| below and in depot_alloc_stack().
15ef6a98 230 */
961c949b 231 if (smp_load_acquire(&next_pool_inited))
15ef6a98 232 return true;
961c949b
AK
233 if (stack_pools[pool_index] == NULL) {
234 stack_pools[pool_index] = *prealloc;
15ef6a98
AK
235 *prealloc = NULL;
236 } else {
961c949b
AK
237 /* If this is the last depot pool, do not touch the next one. */
238 if (pool_index + 1 < STACK_ALLOC_MAX_POOLS) {
239 stack_pools[pool_index + 1] = *prealloc;
15ef6a98
AK
240 *prealloc = NULL;
241 }
242 /*
243 * This smp_store_release pairs with smp_load_acquire() from
961c949b 244 * |next_pool_inited| above and in stack_depot_save().
15ef6a98 245 */
961c949b 246 smp_store_release(&next_pool_inited, 1);
15ef6a98
AK
247 }
248 return true;
249}
250
251/* Allocation of a new stack in raw storage */
252static struct stack_record *
253depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
254{
255 struct stack_record *stack;
256 size_t required_size = struct_size(stack, entries, size);
257
258 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
259
961c949b
AK
260 if (unlikely(pool_offset + required_size > STACK_ALLOC_SIZE)) {
261 if (unlikely(pool_index + 1 >= STACK_ALLOC_MAX_POOLS)) {
15ef6a98
AK
262 WARN_ONCE(1, "Stack depot reached limit capacity");
263 return NULL;
264 }
961c949b
AK
265 pool_index++;
266 pool_offset = 0;
15ef6a98
AK
267 /*
268 * smp_store_release() here pairs with smp_load_acquire() from
961c949b
AK
269 * |next_pool_inited| in stack_depot_save() and
270 * init_stack_pool().
15ef6a98 271 */
961c949b
AK
272 if (pool_index + 1 < STACK_ALLOC_MAX_POOLS)
273 smp_store_release(&next_pool_inited, 0);
15ef6a98 274 }
961c949b
AK
275 init_stack_pool(prealloc);
276 if (stack_pools[pool_index] == NULL)
15ef6a98
AK
277 return NULL;
278
961c949b 279 stack = stack_pools[pool_index] + pool_offset;
15ef6a98
AK
280
281 stack->hash = hash;
282 stack->size = size;
961c949b
AK
283 stack->handle.pool_index = pool_index;
284 stack->handle.offset = pool_offset >> STACK_ALLOC_ALIGN;
15ef6a98
AK
285 stack->handle.valid = 1;
286 stack->handle.extra = 0;
287 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
961c949b 288 pool_offset += required_size;
15ef6a98
AK
289
290 return stack;
291}
292
cd11016e
AP
293/* Calculate hash for a stack */
294static inline u32 hash_stack(unsigned long *entries, unsigned int size)
295{
296 return jhash2((u32 *)entries,
180644f8
GS
297 array_size(size, sizeof(*entries)) / sizeof(u32),
298 STACK_HASH_SEED);
cd11016e
AP
299}
300
a571b272
AP
301/* Use our own, non-instrumented version of memcmp().
302 *
303 * We actually don't care about the order, just the equality.
304 */
305static inline
306int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
307 unsigned int n)
308{
309 for ( ; n-- ; u1++, u2++) {
310 if (*u1 != *u2)
311 return 1;
312 }
313 return 0;
314}
315
cd11016e
AP
316/* Find a stack that is equal to the one stored in entries in the hash */
317static inline struct stack_record *find_stack(struct stack_record *bucket,
318 unsigned long *entries, int size,
319 u32 hash)
320{
321 struct stack_record *found;
322
323 for (found = bucket; found; found = found->next) {
324 if (found->hash == hash &&
325 found->size == size &&
a571b272 326 !stackdepot_memcmp(entries, found->entries, size))
cd11016e 327 return found;
cd11016e
AP
328 }
329 return NULL;
330}
331
cd11016e 332/**
11ac25c6 333 * __stack_depot_save - Save a stack trace from an array
c0cfc337
TG
334 *
335 * @entries: Pointer to storage array
336 * @nr_entries: Size of the storage array
83a4f1ef 337 * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
c0cfc337 338 * @alloc_flags: Allocation gfp flags
961c949b 339 * @can_alloc: Allocate stack pools (increased chance of failure if false)
11ac25c6
ME
340 *
341 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
961c949b 342 * %true, is allowed to replenish the stack pool in case no space is left
11ac25c6
ME
343 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
344 * any allocations and will fail if no space is left to store the stack trace.
345 *
e9400660
ME
346 * If the stack trace in @entries is from an interrupt, only the portion up to
347 * interrupt entry is saved.
348 *
83a4f1ef
AP
349 * Additional opaque flags can be passed in @extra_bits, stored in the unused
350 * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
351 * without calling stack_depot_fetch().
352 *
11ac25c6
ME
353 * Context: Any context, but setting @can_alloc to %false is required if
354 * alloc_pages() cannot be used from the current context. Currently
355 * this is the case from contexts where neither %GFP_ATOMIC nor
356 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
cd11016e 357 *
11ac25c6 358 * Return: The handle of the stack struct stored in depot, 0 on failure.
cd11016e 359 */
11ac25c6
ME
360depot_stack_handle_t __stack_depot_save(unsigned long *entries,
361 unsigned int nr_entries,
83a4f1ef 362 unsigned int extra_bits,
11ac25c6 363 gfp_t alloc_flags, bool can_alloc)
cd11016e 364{
cd11016e 365 struct stack_record *found = NULL, **bucket;
83a4f1ef 366 union handle_parts retval = { .handle = 0 };
cd11016e
AP
367 struct page *page = NULL;
368 void *prealloc = NULL;
c0cfc337
TG
369 unsigned long flags;
370 u32 hash;
cd11016e 371
e9400660
ME
372 /*
373 * If this stack trace is from an interrupt, including anything before
374 * interrupt entry usually leads to unbounded stackdepot growth.
375 *
376 * Because use of filter_irq_stacks() is a requirement to ensure
377 * stackdepot can efficiently deduplicate interrupt stacks, always
378 * filter_irq_stacks() to simplify all callers' use of stackdepot.
379 */
380 nr_entries = filter_irq_stacks(entries, nr_entries);
381
735df3c3 382 if (unlikely(nr_entries == 0) || stack_depot_disabled)
cd11016e
AP
383 goto fast_exit;
384
c0cfc337 385 hash = hash_stack(entries, nr_entries);
f9987921 386 bucket = &stack_table[hash & stack_hash_mask];
cd11016e
AP
387
388 /*
389 * Fast path: look the stack trace up without locking.
390 * The smp_load_acquire() here pairs with smp_store_release() to
391 * |bucket| below.
392 */
c0cfc337
TG
393 found = find_stack(smp_load_acquire(bucket), entries,
394 nr_entries, hash);
cd11016e
AP
395 if (found)
396 goto exit;
397
398 /*
961c949b 399 * Check if the current or the next stack pool need to be initialized.
cd11016e
AP
400 * If so, allocate the memory - we won't be able to do that under the
401 * lock.
402 *
403 * The smp_load_acquire() here pairs with smp_store_release() to
961c949b 404 * |next_pool_inited| in depot_alloc_stack() and init_stack_pool().
cd11016e 405 */
961c949b 406 if (unlikely(can_alloc && !smp_load_acquire(&next_pool_inited))) {
cd11016e
AP
407 /*
408 * Zero out zone modifiers, as we don't have specific zone
409 * requirements. Keep the flags related to allocation in atomic
410 * contexts and I/O.
411 */
412 alloc_flags &= ~GFP_ZONEMASK;
413 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
87cc271d 414 alloc_flags |= __GFP_NOWARN;
cd11016e
AP
415 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
416 if (page)
417 prealloc = page_address(page);
418 }
419
961c949b 420 raw_spin_lock_irqsave(&pool_lock, flags);
cd11016e 421
c0cfc337 422 found = find_stack(*bucket, entries, nr_entries, hash);
cd11016e 423 if (!found) {
7f2b8818
ME
424 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
425
cd11016e
AP
426 if (new) {
427 new->next = *bucket;
428 /*
429 * This smp_store_release() pairs with
430 * smp_load_acquire() from |bucket| above.
431 */
432 smp_store_release(bucket, new);
433 found = new;
434 }
435 } else if (prealloc) {
436 /*
437 * We didn't need to store this stack trace, but let's keep
438 * the preallocated memory for the future.
439 */
961c949b 440 WARN_ON(!init_stack_pool(&prealloc));
cd11016e
AP
441 }
442
961c949b 443 raw_spin_unlock_irqrestore(&pool_lock, flags);
cd11016e
AP
444exit:
445 if (prealloc) {
446 /* Nobody used this memory, ok to free it. */
447 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
448 }
449 if (found)
83a4f1ef 450 retval.handle = found->handle.handle;
cd11016e 451fast_exit:
83a4f1ef
AP
452 retval.extra = extra_bits;
453
454 return retval.handle;
cd11016e 455}
11ac25c6
ME
456EXPORT_SYMBOL_GPL(__stack_depot_save);
457
458/**
459 * stack_depot_save - Save a stack trace from an array
460 *
461 * @entries: Pointer to storage array
462 * @nr_entries: Size of the storage array
463 * @alloc_flags: Allocation gfp flags
464 *
465 * Context: Contexts where allocations via alloc_pages() are allowed.
466 * See __stack_depot_save() for more details.
467 *
468 * Return: The handle of the stack struct stored in depot, 0 on failure.
469 */
470depot_stack_handle_t stack_depot_save(unsigned long *entries,
471 unsigned int nr_entries,
472 gfp_t alloc_flags)
473{
83a4f1ef 474 return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
11ac25c6 475}
c0cfc337 476EXPORT_SYMBOL_GPL(stack_depot_save);
15ef6a98
AK
477
478/**
479 * stack_depot_fetch - Fetch stack entries from a depot
480 *
481 * @handle: Stack depot handle which was returned from
482 * stack_depot_save().
483 * @entries: Pointer to store the entries address
484 *
485 * Return: The number of trace entries for this depot.
486 */
487unsigned int stack_depot_fetch(depot_stack_handle_t handle,
488 unsigned long **entries)
489{
490 union handle_parts parts = { .handle = handle };
961c949b 491 void *pool;
15ef6a98
AK
492 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
493 struct stack_record *stack;
494
495 *entries = NULL;
496 if (!handle)
497 return 0;
498
961c949b
AK
499 if (parts.pool_index > pool_index) {
500 WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
501 parts.pool_index, pool_index, handle);
15ef6a98
AK
502 return 0;
503 }
961c949b
AK
504 pool = stack_pools[parts.pool_index];
505 if (!pool)
15ef6a98 506 return 0;
961c949b 507 stack = pool + offset;
15ef6a98
AK
508
509 *entries = stack->entries;
510 return stack->size;
511}
512EXPORT_SYMBOL_GPL(stack_depot_fetch);
513
514/**
515 * stack_depot_print - print stack entries from a depot
516 *
517 * @stack: Stack depot handle which was returned from
518 * stack_depot_save().
519 *
520 */
521void stack_depot_print(depot_stack_handle_t stack)
522{
523 unsigned long *entries;
524 unsigned int nr_entries;
525
526 nr_entries = stack_depot_fetch(stack, &entries);
527 if (nr_entries > 0)
528 stack_trace_print(entries, nr_entries, 0);
529}
530EXPORT_SYMBOL_GPL(stack_depot_print);
531
532/**
533 * stack_depot_snprint - print stack entries from a depot into a buffer
534 *
535 * @handle: Stack depot handle which was returned from
536 * stack_depot_save().
537 * @buf: Pointer to the print buffer
538 *
539 * @size: Size of the print buffer
540 *
541 * @spaces: Number of leading spaces to print
542 *
543 * Return: Number of bytes printed.
544 */
545int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
546 int spaces)
547{
548 unsigned long *entries;
549 unsigned int nr_entries;
550
551 nr_entries = stack_depot_fetch(handle, &entries);
552 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
553 spaces) : 0;
554}
555EXPORT_SYMBOL_GPL(stack_depot_snprint);
556
557unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
558{
559 union handle_parts parts = { .handle = handle };
560
561 return parts.extra;
562}
563EXPORT_SYMBOL(stack_depot_get_extra_bits);