| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * SLUB: A slab allocator that limits cache line use instead of queuing |
| 4 | * objects in per cpu and per node lists. |
| 5 | * |
| 6 | * The allocator synchronizes using per slab locks or atomic operations |
| 7 | * and only uses a centralized lock to manage a pool of partial slabs. |
| 8 | * |
| 9 | * (C) 2007 SGI, Christoph Lameter |
| 10 | * (C) 2011 Linux Foundation, Christoph Lameter |
| 11 | */ |
| 12 | |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/swap.h> /* mm_account_reclaimed_pages() */ |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/bit_spinlock.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/swab.h> |
| 19 | #include <linux/bitops.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include "slab.h" |
| 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/proc_fs.h> |
| 24 | #include <linux/seq_file.h> |
| 25 | #include <linux/kasan.h> |
| 26 | #include <linux/kmsan.h> |
| 27 | #include <linux/cpu.h> |
| 28 | #include <linux/cpuset.h> |
| 29 | #include <linux/mempolicy.h> |
| 30 | #include <linux/ctype.h> |
| 31 | #include <linux/stackdepot.h> |
| 32 | #include <linux/debugobjects.h> |
| 33 | #include <linux/kallsyms.h> |
| 34 | #include <linux/kfence.h> |
| 35 | #include <linux/memory.h> |
| 36 | #include <linux/math64.h> |
| 37 | #include <linux/fault-inject.h> |
| 38 | #include <linux/kmemleak.h> |
| 39 | #include <linux/stacktrace.h> |
| 40 | #include <linux/prefetch.h> |
| 41 | #include <linux/memcontrol.h> |
| 42 | #include <linux/random.h> |
| 43 | #include <kunit/test.h> |
| 44 | #include <kunit/test-bug.h> |
| 45 | #include <linux/sort.h> |
| 46 | |
| 47 | #include <linux/debugfs.h> |
| 48 | #include <trace/events/kmem.h> |
| 49 | |
| 50 | #include "internal.h" |
| 51 | |
| 52 | /* |
| 53 | * Lock order: |
| 54 | * 1. slab_mutex (Global Mutex) |
| 55 | * 2. node->list_lock (Spinlock) |
| 56 | * 3. kmem_cache->cpu_slab->lock (Local lock) |
| 57 | * 4. slab_lock(slab) (Only on some arches) |
| 58 | * 5. object_map_lock (Only for debugging) |
| 59 | * |
| 60 | * slab_mutex |
| 61 | * |
| 62 | * The role of the slab_mutex is to protect the list of all the slabs |
| 63 | * and to synchronize major metadata changes to slab cache structures. |
| 64 | * Also synchronizes memory hotplug callbacks. |
| 65 | * |
| 66 | * slab_lock |
| 67 | * |
| 68 | * The slab_lock is a wrapper around the page lock, thus it is a bit |
| 69 | * spinlock. |
| 70 | * |
| 71 | * The slab_lock is only used on arches that do not have the ability |
| 72 | * to do a cmpxchg_double. It only protects: |
| 73 | * |
| 74 | * A. slab->freelist -> List of free objects in a slab |
| 75 | * B. slab->inuse -> Number of objects in use |
| 76 | * C. slab->objects -> Number of objects in slab |
| 77 | * D. slab->frozen -> frozen state |
| 78 | * |
| 79 | * Frozen slabs |
| 80 | * |
| 81 | * If a slab is frozen then it is exempt from list management. It is |
| 82 | * the cpu slab which is actively allocated from by the processor that |
| 83 | * froze it and it is not on any list. The processor that froze the |
| 84 | * slab is the one who can perform list operations on the slab. Other |
| 85 | * processors may put objects onto the freelist but the processor that |
| 86 | * froze the slab is the only one that can retrieve the objects from the |
| 87 | * slab's freelist. |
| 88 | * |
| 89 | * CPU partial slabs |
| 90 | * |
| 91 | * The partially empty slabs cached on the CPU partial list are used |
| 92 | * for performance reasons, which speeds up the allocation process. |
| 93 | * These slabs are not frozen, but are also exempt from list management, |
| 94 | * by clearing the PG_workingset flag when moving out of the node |
| 95 | * partial list. Please see __slab_free() for more details. |
| 96 | * |
| 97 | * To sum up, the current scheme is: |
| 98 | * - node partial slab: PG_Workingset && !frozen |
| 99 | * - cpu partial slab: !PG_Workingset && !frozen |
| 100 | * - cpu slab: !PG_Workingset && frozen |
| 101 | * - full slab: !PG_Workingset && !frozen |
| 102 | * |
| 103 | * list_lock |
| 104 | * |
| 105 | * The list_lock protects the partial and full list on each node and |
| 106 | * the partial slab counter. If taken then no new slabs may be added or |
| 107 | * removed from the lists nor make the number of partial slabs be modified. |
| 108 | * (Note that the total number of slabs is an atomic value that may be |
| 109 | * modified without taking the list lock). |
| 110 | * |
| 111 | * The list_lock is a centralized lock and thus we avoid taking it as |
| 112 | * much as possible. As long as SLUB does not have to handle partial |
| 113 | * slabs, operations can continue without any centralized lock. F.e. |
| 114 | * allocating a long series of objects that fill up slabs does not require |
| 115 | * the list lock. |
| 116 | * |
| 117 | * For debug caches, all allocations are forced to go through a list_lock |
| 118 | * protected region to serialize against concurrent validation. |
| 119 | * |
| 120 | * cpu_slab->lock local lock |
| 121 | * |
| 122 | * This locks protect slowpath manipulation of all kmem_cache_cpu fields |
| 123 | * except the stat counters. This is a percpu structure manipulated only by |
| 124 | * the local cpu, so the lock protects against being preempted or interrupted |
| 125 | * by an irq. Fast path operations rely on lockless operations instead. |
| 126 | * |
| 127 | * On PREEMPT_RT, the local lock neither disables interrupts nor preemption |
| 128 | * which means the lockless fastpath cannot be used as it might interfere with |
| 129 | * an in-progress slow path operations. In this case the local lock is always |
| 130 | * taken but it still utilizes the freelist for the common operations. |
| 131 | * |
| 132 | * lockless fastpaths |
| 133 | * |
| 134 | * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) |
| 135 | * are fully lockless when satisfied from the percpu slab (and when |
| 136 | * cmpxchg_double is possible to use, otherwise slab_lock is taken). |
| 137 | * They also don't disable preemption or migration or irqs. They rely on |
| 138 | * the transaction id (tid) field to detect being preempted or moved to |
| 139 | * another cpu. |
| 140 | * |
| 141 | * irq, preemption, migration considerations |
| 142 | * |
| 143 | * Interrupts are disabled as part of list_lock or local_lock operations, or |
| 144 | * around the slab_lock operation, in order to make the slab allocator safe |
| 145 | * to use in the context of an irq. |
| 146 | * |
| 147 | * In addition, preemption (or migration on PREEMPT_RT) is disabled in the |
| 148 | * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the |
| 149 | * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer |
| 150 | * doesn't have to be revalidated in each section protected by the local lock. |
| 151 | * |
| 152 | * SLUB assigns one slab for allocation to each processor. |
| 153 | * Allocations only occur from these slabs called cpu slabs. |
| 154 | * |
| 155 | * Slabs with free elements are kept on a partial list and during regular |
| 156 | * operations no list for full slabs is used. If an object in a full slab is |
| 157 | * freed then the slab will show up again on the partial lists. |
| 158 | * We track full slabs for debugging purposes though because otherwise we |
| 159 | * cannot scan all objects. |
| 160 | * |
| 161 | * Slabs are freed when they become empty. Teardown and setup is |
| 162 | * minimal so we rely on the page allocators per cpu caches for |
| 163 | * fast frees and allocs. |
| 164 | * |
| 165 | * slab->frozen The slab is frozen and exempt from list processing. |
| 166 | * This means that the slab is dedicated to a purpose |
| 167 | * such as satisfying allocations for a specific |
| 168 | * processor. Objects may be freed in the slab while |
| 169 | * it is frozen but slab_free will then skip the usual |
| 170 | * list operations. It is up to the processor holding |
| 171 | * the slab to integrate the slab into the slab lists |
| 172 | * when the slab is no longer needed. |
| 173 | * |
| 174 | * One use of this flag is to mark slabs that are |
| 175 | * used for allocations. Then such a slab becomes a cpu |
| 176 | * slab. The cpu slab may be equipped with an additional |
| 177 | * freelist that allows lockless access to |
| 178 | * free objects in addition to the regular freelist |
| 179 | * that requires the slab lock. |
| 180 | * |
| 181 | * SLAB_DEBUG_FLAGS Slab requires special handling due to debug |
| 182 | * options set. This moves slab handling out of |
| 183 | * the fast path and disables lockless freelists. |
| 184 | */ |
| 185 | |
| 186 | /* |
| 187 | * We could simply use migrate_disable()/enable() but as long as it's a |
| 188 | * function call even on !PREEMPT_RT, use inline preempt_disable() there. |
| 189 | */ |
| 190 | #ifndef CONFIG_PREEMPT_RT |
| 191 | #define slub_get_cpu_ptr(var) get_cpu_ptr(var) |
| 192 | #define slub_put_cpu_ptr(var) put_cpu_ptr(var) |
| 193 | #define USE_LOCKLESS_FAST_PATH() (true) |
| 194 | #else |
| 195 | #define slub_get_cpu_ptr(var) \ |
| 196 | ({ \ |
| 197 | migrate_disable(); \ |
| 198 | this_cpu_ptr(var); \ |
| 199 | }) |
| 200 | #define slub_put_cpu_ptr(var) \ |
| 201 | do { \ |
| 202 | (void)(var); \ |
| 203 | migrate_enable(); \ |
| 204 | } while (0) |
| 205 | #define USE_LOCKLESS_FAST_PATH() (false) |
| 206 | #endif |
| 207 | |
| 208 | #ifndef CONFIG_SLUB_TINY |
| 209 | #define __fastpath_inline __always_inline |
| 210 | #else |
| 211 | #define __fastpath_inline |
| 212 | #endif |
| 213 | |
| 214 | #ifdef CONFIG_SLUB_DEBUG |
| 215 | #ifdef CONFIG_SLUB_DEBUG_ON |
| 216 | DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); |
| 217 | #else |
| 218 | DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); |
| 219 | #endif |
| 220 | #endif /* CONFIG_SLUB_DEBUG */ |
| 221 | |
| 222 | #ifdef CONFIG_NUMA |
| 223 | static DEFINE_STATIC_KEY_FALSE(strict_numa); |
| 224 | #endif |
| 225 | |
| 226 | /* Structure holding parameters for get_partial() call chain */ |
| 227 | struct partial_context { |
| 228 | gfp_t flags; |
| 229 | unsigned int orig_size; |
| 230 | void *object; |
| 231 | }; |
| 232 | |
| 233 | static inline bool kmem_cache_debug(struct kmem_cache *s) |
| 234 | { |
| 235 | return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); |
| 236 | } |
| 237 | |
| 238 | void *fixup_red_left(struct kmem_cache *s, void *p) |
| 239 | { |
| 240 | if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) |
| 241 | p += s->red_left_pad; |
| 242 | |
| 243 | return p; |
| 244 | } |
| 245 | |
| 246 | static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) |
| 247 | { |
| 248 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 249 | return !kmem_cache_debug(s); |
| 250 | #else |
| 251 | return false; |
| 252 | #endif |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * Issues still to be resolved: |
| 257 | * |
| 258 | * - Support PAGE_ALLOC_DEBUG. Should be easy to do. |
| 259 | * |
| 260 | * - Variable sizing of the per node arrays |
| 261 | */ |
| 262 | |
| 263 | /* Enable to log cmpxchg failures */ |
| 264 | #undef SLUB_DEBUG_CMPXCHG |
| 265 | |
| 266 | #ifndef CONFIG_SLUB_TINY |
| 267 | /* |
| 268 | * Minimum number of partial slabs. These will be left on the partial |
| 269 | * lists even if they are empty. kmem_cache_shrink may reclaim them. |
| 270 | */ |
| 271 | #define MIN_PARTIAL 5 |
| 272 | |
| 273 | /* |
| 274 | * Maximum number of desirable partial slabs. |
| 275 | * The existence of more partial slabs makes kmem_cache_shrink |
| 276 | * sort the partial list by the number of objects in use. |
| 277 | */ |
| 278 | #define MAX_PARTIAL 10 |
| 279 | #else |
| 280 | #define MIN_PARTIAL 0 |
| 281 | #define MAX_PARTIAL 0 |
| 282 | #endif |
| 283 | |
| 284 | #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ |
| 285 | SLAB_POISON | SLAB_STORE_USER) |
| 286 | |
| 287 | /* |
| 288 | * These debug flags cannot use CMPXCHG because there might be consistency |
| 289 | * issues when checking or reading debug information |
| 290 | */ |
| 291 | #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ |
| 292 | SLAB_TRACE) |
| 293 | |
| 294 | |
| 295 | /* |
| 296 | * Debugging flags that require metadata to be stored in the slab. These get |
| 297 | * disabled when slab_debug=O is used and a cache's min order increases with |
| 298 | * metadata. |
| 299 | */ |
| 300 | #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
| 301 | |
| 302 | #define OO_SHIFT 16 |
| 303 | #define OO_MASK ((1 << OO_SHIFT) - 1) |
| 304 | #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ |
| 305 | |
| 306 | /* Internal SLUB flags */ |
| 307 | /* Poison object */ |
| 308 | #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) |
| 309 | /* Use cmpxchg_double */ |
| 310 | |
| 311 | #ifdef system_has_freelist_aba |
| 312 | #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) |
| 313 | #else |
| 314 | #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED |
| 315 | #endif |
| 316 | |
| 317 | /* |
| 318 | * Tracking user of a slab. |
| 319 | */ |
| 320 | #define TRACK_ADDRS_COUNT 16 |
| 321 | struct track { |
| 322 | unsigned long addr; /* Called from address */ |
| 323 | #ifdef CONFIG_STACKDEPOT |
| 324 | depot_stack_handle_t handle; |
| 325 | #endif |
| 326 | int cpu; /* Was running on cpu */ |
| 327 | int pid; /* Pid context */ |
| 328 | unsigned long when; /* When did the operation occur */ |
| 329 | }; |
| 330 | |
| 331 | enum track_item { TRACK_ALLOC, TRACK_FREE }; |
| 332 | |
| 333 | #ifdef SLAB_SUPPORTS_SYSFS |
| 334 | static int sysfs_slab_add(struct kmem_cache *); |
| 335 | static int sysfs_slab_alias(struct kmem_cache *, const char *); |
| 336 | #else |
| 337 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } |
| 338 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) |
| 339 | { return 0; } |
| 340 | #endif |
| 341 | |
| 342 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) |
| 343 | static void debugfs_slab_add(struct kmem_cache *); |
| 344 | #else |
| 345 | static inline void debugfs_slab_add(struct kmem_cache *s) { } |
| 346 | #endif |
| 347 | |
| 348 | enum stat_item { |
| 349 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| 350 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
| 351 | FREE_FASTPATH, /* Free to cpu slab */ |
| 352 | FREE_SLOWPATH, /* Freeing not to cpu slab */ |
| 353 | FREE_FROZEN, /* Freeing to frozen slab */ |
| 354 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ |
| 355 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ |
| 356 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ |
| 357 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
| 358 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ |
| 359 | ALLOC_NODE_MISMATCH, /* Switching cpu slab */ |
| 360 | FREE_SLAB, /* Slab freed to the page allocator */ |
| 361 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ |
| 362 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ |
| 363 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ |
| 364 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ |
| 365 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ |
| 366 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
| 367 | DEACTIVATE_BYPASS, /* Implicit deactivation */ |
| 368 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
| 369 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ |
| 370 | CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ |
| 371 | CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ |
| 372 | CPU_PARTIAL_FREE, /* Refill cpu partial on free */ |
| 373 | CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ |
| 374 | CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ |
| 375 | NR_SLUB_STAT_ITEMS |
| 376 | }; |
| 377 | |
| 378 | #ifndef CONFIG_SLUB_TINY |
| 379 | /* |
| 380 | * When changing the layout, make sure freelist and tid are still compatible |
| 381 | * with this_cpu_cmpxchg_double() alignment requirements. |
| 382 | */ |
| 383 | struct kmem_cache_cpu { |
| 384 | union { |
| 385 | struct { |
| 386 | void **freelist; /* Pointer to next available object */ |
| 387 | unsigned long tid; /* Globally unique transaction id */ |
| 388 | }; |
| 389 | freelist_aba_t freelist_tid; |
| 390 | }; |
| 391 | struct slab *slab; /* The slab from which we are allocating */ |
| 392 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 393 | struct slab *partial; /* Partially allocated slabs */ |
| 394 | #endif |
| 395 | local_lock_t lock; /* Protects the fields above */ |
| 396 | #ifdef CONFIG_SLUB_STATS |
| 397 | unsigned int stat[NR_SLUB_STAT_ITEMS]; |
| 398 | #endif |
| 399 | }; |
| 400 | #endif /* CONFIG_SLUB_TINY */ |
| 401 | |
| 402 | static inline void stat(const struct kmem_cache *s, enum stat_item si) |
| 403 | { |
| 404 | #ifdef CONFIG_SLUB_STATS |
| 405 | /* |
| 406 | * The rmw is racy on a preemptible kernel but this is acceptable, so |
| 407 | * avoid this_cpu_add()'s irq-disable overhead. |
| 408 | */ |
| 409 | raw_cpu_inc(s->cpu_slab->stat[si]); |
| 410 | #endif |
| 411 | } |
| 412 | |
| 413 | static inline |
| 414 | void stat_add(const struct kmem_cache *s, enum stat_item si, int v) |
| 415 | { |
| 416 | #ifdef CONFIG_SLUB_STATS |
| 417 | raw_cpu_add(s->cpu_slab->stat[si], v); |
| 418 | #endif |
| 419 | } |
| 420 | |
| 421 | /* |
| 422 | * The slab lists for all objects. |
| 423 | */ |
| 424 | struct kmem_cache_node { |
| 425 | spinlock_t list_lock; |
| 426 | unsigned long nr_partial; |
| 427 | struct list_head partial; |
| 428 | #ifdef CONFIG_SLUB_DEBUG |
| 429 | atomic_long_t nr_slabs; |
| 430 | atomic_long_t total_objects; |
| 431 | struct list_head full; |
| 432 | #endif |
| 433 | }; |
| 434 | |
| 435 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
| 436 | { |
| 437 | return s->node[node]; |
| 438 | } |
| 439 | |
| 440 | /* |
| 441 | * Iterator over all nodes. The body will be executed for each node that has |
| 442 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
| 443 | */ |
| 444 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
| 445 | for (__node = 0; __node < nr_node_ids; __node++) \ |
| 446 | if ((__n = get_node(__s, __node))) |
| 447 | |
| 448 | /* |
| 449 | * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. |
| 450 | * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily |
| 451 | * differ during memory hotplug/hotremove operations. |
| 452 | * Protected by slab_mutex. |
| 453 | */ |
| 454 | static nodemask_t slab_nodes; |
| 455 | |
| 456 | #ifndef CONFIG_SLUB_TINY |
| 457 | /* |
| 458 | * Workqueue used for flush_cpu_slab(). |
| 459 | */ |
| 460 | static struct workqueue_struct *flushwq; |
| 461 | #endif |
| 462 | |
| 463 | /******************************************************************** |
| 464 | * Core slab cache functions |
| 465 | *******************************************************************/ |
| 466 | |
| 467 | /* |
| 468 | * Returns freelist pointer (ptr). With hardening, this is obfuscated |
| 469 | * with an XOR of the address where the pointer is held and a per-cache |
| 470 | * random number. |
| 471 | */ |
| 472 | static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, |
| 473 | void *ptr, unsigned long ptr_addr) |
| 474 | { |
| 475 | unsigned long encoded; |
| 476 | |
| 477 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| 478 | encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); |
| 479 | #else |
| 480 | encoded = (unsigned long)ptr; |
| 481 | #endif |
| 482 | return (freeptr_t){.v = encoded}; |
| 483 | } |
| 484 | |
| 485 | static inline void *freelist_ptr_decode(const struct kmem_cache *s, |
| 486 | freeptr_t ptr, unsigned long ptr_addr) |
| 487 | { |
| 488 | void *decoded; |
| 489 | |
| 490 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| 491 | decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); |
| 492 | #else |
| 493 | decoded = (void *)ptr.v; |
| 494 | #endif |
| 495 | return decoded; |
| 496 | } |
| 497 | |
| 498 | static inline void *get_freepointer(struct kmem_cache *s, void *object) |
| 499 | { |
| 500 | unsigned long ptr_addr; |
| 501 | freeptr_t p; |
| 502 | |
| 503 | object = kasan_reset_tag(object); |
| 504 | ptr_addr = (unsigned long)object + s->offset; |
| 505 | p = *(freeptr_t *)(ptr_addr); |
| 506 | return freelist_ptr_decode(s, p, ptr_addr); |
| 507 | } |
| 508 | |
| 509 | #ifndef CONFIG_SLUB_TINY |
| 510 | static void prefetch_freepointer(const struct kmem_cache *s, void *object) |
| 511 | { |
| 512 | prefetchw(object + s->offset); |
| 513 | } |
| 514 | #endif |
| 515 | |
| 516 | /* |
| 517 | * When running under KMSAN, get_freepointer_safe() may return an uninitialized |
| 518 | * pointer value in the case the current thread loses the race for the next |
| 519 | * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in |
| 520 | * slab_alloc_node() will fail, so the uninitialized value won't be used, but |
| 521 | * KMSAN will still check all arguments of cmpxchg because of imperfect |
| 522 | * handling of inline assembly. |
| 523 | * To work around this problem, we apply __no_kmsan_checks to ensure that |
| 524 | * get_freepointer_safe() returns initialized memory. |
| 525 | */ |
| 526 | __no_kmsan_checks |
| 527 | static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) |
| 528 | { |
| 529 | unsigned long freepointer_addr; |
| 530 | freeptr_t p; |
| 531 | |
| 532 | if (!debug_pagealloc_enabled_static()) |
| 533 | return get_freepointer(s, object); |
| 534 | |
| 535 | object = kasan_reset_tag(object); |
| 536 | freepointer_addr = (unsigned long)object + s->offset; |
| 537 | copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); |
| 538 | return freelist_ptr_decode(s, p, freepointer_addr); |
| 539 | } |
| 540 | |
| 541 | static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) |
| 542 | { |
| 543 | unsigned long freeptr_addr = (unsigned long)object + s->offset; |
| 544 | |
| 545 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| 546 | BUG_ON(object == fp); /* naive detection of double free or corruption */ |
| 547 | #endif |
| 548 | |
| 549 | freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); |
| 550 | *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * See comment in calculate_sizes(). |
| 555 | */ |
| 556 | static inline bool freeptr_outside_object(struct kmem_cache *s) |
| 557 | { |
| 558 | return s->offset >= s->inuse; |
| 559 | } |
| 560 | |
| 561 | /* |
| 562 | * Return offset of the end of info block which is inuse + free pointer if |
| 563 | * not overlapping with object. |
| 564 | */ |
| 565 | static inline unsigned int get_info_end(struct kmem_cache *s) |
| 566 | { |
| 567 | if (freeptr_outside_object(s)) |
| 568 | return s->inuse + sizeof(void *); |
| 569 | else |
| 570 | return s->inuse; |
| 571 | } |
| 572 | |
| 573 | /* Loop over all objects in a slab */ |
| 574 | #define for_each_object(__p, __s, __addr, __objects) \ |
| 575 | for (__p = fixup_red_left(__s, __addr); \ |
| 576 | __p < (__addr) + (__objects) * (__s)->size; \ |
| 577 | __p += (__s)->size) |
| 578 | |
| 579 | static inline unsigned int order_objects(unsigned int order, unsigned int size) |
| 580 | { |
| 581 | return ((unsigned int)PAGE_SIZE << order) / size; |
| 582 | } |
| 583 | |
| 584 | static inline struct kmem_cache_order_objects oo_make(unsigned int order, |
| 585 | unsigned int size) |
| 586 | { |
| 587 | struct kmem_cache_order_objects x = { |
| 588 | (order << OO_SHIFT) + order_objects(order, size) |
| 589 | }; |
| 590 | |
| 591 | return x; |
| 592 | } |
| 593 | |
| 594 | static inline unsigned int oo_order(struct kmem_cache_order_objects x) |
| 595 | { |
| 596 | return x.x >> OO_SHIFT; |
| 597 | } |
| 598 | |
| 599 | static inline unsigned int oo_objects(struct kmem_cache_order_objects x) |
| 600 | { |
| 601 | return x.x & OO_MASK; |
| 602 | } |
| 603 | |
| 604 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 605 | static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) |
| 606 | { |
| 607 | unsigned int nr_slabs; |
| 608 | |
| 609 | s->cpu_partial = nr_objects; |
| 610 | |
| 611 | /* |
| 612 | * We take the number of objects but actually limit the number of |
| 613 | * slabs on the per cpu partial list, in order to limit excessive |
| 614 | * growth of the list. For simplicity we assume that the slabs will |
| 615 | * be half-full. |
| 616 | */ |
| 617 | nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); |
| 618 | s->cpu_partial_slabs = nr_slabs; |
| 619 | } |
| 620 | |
| 621 | static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) |
| 622 | { |
| 623 | return s->cpu_partial_slabs; |
| 624 | } |
| 625 | #else |
| 626 | static inline void |
| 627 | slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) |
| 628 | { |
| 629 | } |
| 630 | |
| 631 | static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) |
| 632 | { |
| 633 | return 0; |
| 634 | } |
| 635 | #endif /* CONFIG_SLUB_CPU_PARTIAL */ |
| 636 | |
| 637 | /* |
| 638 | * Per slab locking using the pagelock |
| 639 | */ |
| 640 | static __always_inline void slab_lock(struct slab *slab) |
| 641 | { |
| 642 | bit_spin_lock(PG_locked, &slab->__page_flags); |
| 643 | } |
| 644 | |
| 645 | static __always_inline void slab_unlock(struct slab *slab) |
| 646 | { |
| 647 | bit_spin_unlock(PG_locked, &slab->__page_flags); |
| 648 | } |
| 649 | |
| 650 | static inline bool |
| 651 | __update_freelist_fast(struct slab *slab, |
| 652 | void *freelist_old, unsigned long counters_old, |
| 653 | void *freelist_new, unsigned long counters_new) |
| 654 | { |
| 655 | #ifdef system_has_freelist_aba |
| 656 | freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; |
| 657 | freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; |
| 658 | |
| 659 | return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); |
| 660 | #else |
| 661 | return false; |
| 662 | #endif |
| 663 | } |
| 664 | |
| 665 | static inline bool |
| 666 | __update_freelist_slow(struct slab *slab, |
| 667 | void *freelist_old, unsigned long counters_old, |
| 668 | void *freelist_new, unsigned long counters_new) |
| 669 | { |
| 670 | bool ret = false; |
| 671 | |
| 672 | slab_lock(slab); |
| 673 | if (slab->freelist == freelist_old && |
| 674 | slab->counters == counters_old) { |
| 675 | slab->freelist = freelist_new; |
| 676 | slab->counters = counters_new; |
| 677 | ret = true; |
| 678 | } |
| 679 | slab_unlock(slab); |
| 680 | |
| 681 | return ret; |
| 682 | } |
| 683 | |
| 684 | /* |
| 685 | * Interrupts must be disabled (for the fallback code to work right), typically |
| 686 | * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is |
| 687 | * part of bit_spin_lock(), is sufficient because the policy is not to allow any |
| 688 | * allocation/ free operation in hardirq context. Therefore nothing can |
| 689 | * interrupt the operation. |
| 690 | */ |
| 691 | static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, |
| 692 | void *freelist_old, unsigned long counters_old, |
| 693 | void *freelist_new, unsigned long counters_new, |
| 694 | const char *n) |
| 695 | { |
| 696 | bool ret; |
| 697 | |
| 698 | if (USE_LOCKLESS_FAST_PATH()) |
| 699 | lockdep_assert_irqs_disabled(); |
| 700 | |
| 701 | if (s->flags & __CMPXCHG_DOUBLE) { |
| 702 | ret = __update_freelist_fast(slab, freelist_old, counters_old, |
| 703 | freelist_new, counters_new); |
| 704 | } else { |
| 705 | ret = __update_freelist_slow(slab, freelist_old, counters_old, |
| 706 | freelist_new, counters_new); |
| 707 | } |
| 708 | if (likely(ret)) |
| 709 | return true; |
| 710 | |
| 711 | cpu_relax(); |
| 712 | stat(s, CMPXCHG_DOUBLE_FAIL); |
| 713 | |
| 714 | #ifdef SLUB_DEBUG_CMPXCHG |
| 715 | pr_info("%s %s: cmpxchg double redo ", n, s->name); |
| 716 | #endif |
| 717 | |
| 718 | return false; |
| 719 | } |
| 720 | |
| 721 | static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, |
| 722 | void *freelist_old, unsigned long counters_old, |
| 723 | void *freelist_new, unsigned long counters_new, |
| 724 | const char *n) |
| 725 | { |
| 726 | bool ret; |
| 727 | |
| 728 | if (s->flags & __CMPXCHG_DOUBLE) { |
| 729 | ret = __update_freelist_fast(slab, freelist_old, counters_old, |
| 730 | freelist_new, counters_new); |
| 731 | } else { |
| 732 | unsigned long flags; |
| 733 | |
| 734 | local_irq_save(flags); |
| 735 | ret = __update_freelist_slow(slab, freelist_old, counters_old, |
| 736 | freelist_new, counters_new); |
| 737 | local_irq_restore(flags); |
| 738 | } |
| 739 | if (likely(ret)) |
| 740 | return true; |
| 741 | |
| 742 | cpu_relax(); |
| 743 | stat(s, CMPXCHG_DOUBLE_FAIL); |
| 744 | |
| 745 | #ifdef SLUB_DEBUG_CMPXCHG |
| 746 | pr_info("%s %s: cmpxchg double redo ", n, s->name); |
| 747 | #endif |
| 748 | |
| 749 | return false; |
| 750 | } |
| 751 | |
| 752 | /* |
| 753 | * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API |
| 754 | * family will round up the real request size to these fixed ones, so |
| 755 | * there could be an extra area than what is requested. Save the original |
| 756 | * request size in the meta data area, for better debug and sanity check. |
| 757 | */ |
| 758 | static inline void set_orig_size(struct kmem_cache *s, |
| 759 | void *object, unsigned int orig_size) |
| 760 | { |
| 761 | void *p = kasan_reset_tag(object); |
| 762 | |
| 763 | if (!slub_debug_orig_size(s)) |
| 764 | return; |
| 765 | |
| 766 | p += get_info_end(s); |
| 767 | p += sizeof(struct track) * 2; |
| 768 | |
| 769 | *(unsigned int *)p = orig_size; |
| 770 | } |
| 771 | |
| 772 | static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) |
| 773 | { |
| 774 | void *p = kasan_reset_tag(object); |
| 775 | |
| 776 | if (is_kfence_address(object)) |
| 777 | return kfence_ksize(object); |
| 778 | |
| 779 | if (!slub_debug_orig_size(s)) |
| 780 | return s->object_size; |
| 781 | |
| 782 | p += get_info_end(s); |
| 783 | p += sizeof(struct track) * 2; |
| 784 | |
| 785 | return *(unsigned int *)p; |
| 786 | } |
| 787 | |
| 788 | #ifdef CONFIG_SLUB_DEBUG |
| 789 | static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; |
| 790 | static DEFINE_SPINLOCK(object_map_lock); |
| 791 | |
| 792 | static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, |
| 793 | struct slab *slab) |
| 794 | { |
| 795 | void *addr = slab_address(slab); |
| 796 | void *p; |
| 797 | |
| 798 | bitmap_zero(obj_map, slab->objects); |
| 799 | |
| 800 | for (p = slab->freelist; p; p = get_freepointer(s, p)) |
| 801 | set_bit(__obj_to_index(s, addr, p), obj_map); |
| 802 | } |
| 803 | |
| 804 | #if IS_ENABLED(CONFIG_KUNIT) |
| 805 | static bool slab_add_kunit_errors(void) |
| 806 | { |
| 807 | struct kunit_resource *resource; |
| 808 | |
| 809 | if (!kunit_get_current_test()) |
| 810 | return false; |
| 811 | |
| 812 | resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); |
| 813 | if (!resource) |
| 814 | return false; |
| 815 | |
| 816 | (*(int *)resource->data)++; |
| 817 | kunit_put_resource(resource); |
| 818 | return true; |
| 819 | } |
| 820 | |
| 821 | bool slab_in_kunit_test(void) |
| 822 | { |
| 823 | struct kunit_resource *resource; |
| 824 | |
| 825 | if (!kunit_get_current_test()) |
| 826 | return false; |
| 827 | |
| 828 | resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); |
| 829 | if (!resource) |
| 830 | return false; |
| 831 | |
| 832 | kunit_put_resource(resource); |
| 833 | return true; |
| 834 | } |
| 835 | #else |
| 836 | static inline bool slab_add_kunit_errors(void) { return false; } |
| 837 | #endif |
| 838 | |
| 839 | static inline unsigned int size_from_object(struct kmem_cache *s) |
| 840 | { |
| 841 | if (s->flags & SLAB_RED_ZONE) |
| 842 | return s->size - s->red_left_pad; |
| 843 | |
| 844 | return s->size; |
| 845 | } |
| 846 | |
| 847 | static inline void *restore_red_left(struct kmem_cache *s, void *p) |
| 848 | { |
| 849 | if (s->flags & SLAB_RED_ZONE) |
| 850 | p -= s->red_left_pad; |
| 851 | |
| 852 | return p; |
| 853 | } |
| 854 | |
| 855 | /* |
| 856 | * Debug settings: |
| 857 | */ |
| 858 | #if defined(CONFIG_SLUB_DEBUG_ON) |
| 859 | static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; |
| 860 | #else |
| 861 | static slab_flags_t slub_debug; |
| 862 | #endif |
| 863 | |
| 864 | static char *slub_debug_string; |
| 865 | static int disable_higher_order_debug; |
| 866 | |
| 867 | /* |
| 868 | * slub is about to manipulate internal object metadata. This memory lies |
| 869 | * outside the range of the allocated object, so accessing it would normally |
| 870 | * be reported by kasan as a bounds error. metadata_access_enable() is used |
| 871 | * to tell kasan that these accesses are OK. |
| 872 | */ |
| 873 | static inline void metadata_access_enable(void) |
| 874 | { |
| 875 | kasan_disable_current(); |
| 876 | kmsan_disable_current(); |
| 877 | } |
| 878 | |
| 879 | static inline void metadata_access_disable(void) |
| 880 | { |
| 881 | kmsan_enable_current(); |
| 882 | kasan_enable_current(); |
| 883 | } |
| 884 | |
| 885 | /* |
| 886 | * Object debugging |
| 887 | */ |
| 888 | |
| 889 | /* Verify that a pointer has an address that is valid within a slab page */ |
| 890 | static inline int check_valid_pointer(struct kmem_cache *s, |
| 891 | struct slab *slab, void *object) |
| 892 | { |
| 893 | void *base; |
| 894 | |
| 895 | if (!object) |
| 896 | return 1; |
| 897 | |
| 898 | base = slab_address(slab); |
| 899 | object = kasan_reset_tag(object); |
| 900 | object = restore_red_left(s, object); |
| 901 | if (object < base || object >= base + slab->objects * s->size || |
| 902 | (object - base) % s->size) { |
| 903 | return 0; |
| 904 | } |
| 905 | |
| 906 | return 1; |
| 907 | } |
| 908 | |
| 909 | static void print_section(char *level, char *text, u8 *addr, |
| 910 | unsigned int length) |
| 911 | { |
| 912 | metadata_access_enable(); |
| 913 | print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, |
| 914 | 16, 1, kasan_reset_tag((void *)addr), length, 1); |
| 915 | metadata_access_disable(); |
| 916 | } |
| 917 | |
| 918 | static struct track *get_track(struct kmem_cache *s, void *object, |
| 919 | enum track_item alloc) |
| 920 | { |
| 921 | struct track *p; |
| 922 | |
| 923 | p = object + get_info_end(s); |
| 924 | |
| 925 | return kasan_reset_tag(p + alloc); |
| 926 | } |
| 927 | |
| 928 | #ifdef CONFIG_STACKDEPOT |
| 929 | static noinline depot_stack_handle_t set_track_prepare(void) |
| 930 | { |
| 931 | depot_stack_handle_t handle; |
| 932 | unsigned long entries[TRACK_ADDRS_COUNT]; |
| 933 | unsigned int nr_entries; |
| 934 | |
| 935 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); |
| 936 | handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); |
| 937 | |
| 938 | return handle; |
| 939 | } |
| 940 | #else |
| 941 | static inline depot_stack_handle_t set_track_prepare(void) |
| 942 | { |
| 943 | return 0; |
| 944 | } |
| 945 | #endif |
| 946 | |
| 947 | static void set_track_update(struct kmem_cache *s, void *object, |
| 948 | enum track_item alloc, unsigned long addr, |
| 949 | depot_stack_handle_t handle) |
| 950 | { |
| 951 | struct track *p = get_track(s, object, alloc); |
| 952 | |
| 953 | #ifdef CONFIG_STACKDEPOT |
| 954 | p->handle = handle; |
| 955 | #endif |
| 956 | p->addr = addr; |
| 957 | p->cpu = smp_processor_id(); |
| 958 | p->pid = current->pid; |
| 959 | p->when = jiffies; |
| 960 | } |
| 961 | |
| 962 | static __always_inline void set_track(struct kmem_cache *s, void *object, |
| 963 | enum track_item alloc, unsigned long addr) |
| 964 | { |
| 965 | depot_stack_handle_t handle = set_track_prepare(); |
| 966 | |
| 967 | set_track_update(s, object, alloc, addr, handle); |
| 968 | } |
| 969 | |
| 970 | static void init_tracking(struct kmem_cache *s, void *object) |
| 971 | { |
| 972 | struct track *p; |
| 973 | |
| 974 | if (!(s->flags & SLAB_STORE_USER)) |
| 975 | return; |
| 976 | |
| 977 | p = get_track(s, object, TRACK_ALLOC); |
| 978 | memset(p, 0, 2*sizeof(struct track)); |
| 979 | } |
| 980 | |
| 981 | static void print_track(const char *s, struct track *t, unsigned long pr_time) |
| 982 | { |
| 983 | depot_stack_handle_t handle __maybe_unused; |
| 984 | |
| 985 | if (!t->addr) |
| 986 | return; |
| 987 | |
| 988 | pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", |
| 989 | s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); |
| 990 | #ifdef CONFIG_STACKDEPOT |
| 991 | handle = READ_ONCE(t->handle); |
| 992 | if (handle) |
| 993 | stack_depot_print(handle); |
| 994 | else |
| 995 | pr_err("object allocation/free stack trace missing\n"); |
| 996 | #endif |
| 997 | } |
| 998 | |
| 999 | void print_tracking(struct kmem_cache *s, void *object) |
| 1000 | { |
| 1001 | unsigned long pr_time = jiffies; |
| 1002 | if (!(s->flags & SLAB_STORE_USER)) |
| 1003 | return; |
| 1004 | |
| 1005 | print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); |
| 1006 | print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); |
| 1007 | } |
| 1008 | |
| 1009 | static void print_slab_info(const struct slab *slab) |
| 1010 | { |
| 1011 | pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", |
| 1012 | slab, slab->objects, slab->inuse, slab->freelist, |
| 1013 | &slab->__page_flags); |
| 1014 | } |
| 1015 | |
| 1016 | void skip_orig_size_check(struct kmem_cache *s, const void *object) |
| 1017 | { |
| 1018 | set_orig_size(s, (void *)object, s->object_size); |
| 1019 | } |
| 1020 | |
| 1021 | static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp) |
| 1022 | { |
| 1023 | struct va_format vaf; |
| 1024 | va_list args; |
| 1025 | |
| 1026 | va_copy(args, argsp); |
| 1027 | vaf.fmt = fmt; |
| 1028 | vaf.va = &args; |
| 1029 | pr_err("=============================================================================\n"); |
| 1030 | pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf); |
| 1031 | pr_err("-----------------------------------------------------------------------------\n\n"); |
| 1032 | va_end(args); |
| 1033 | } |
| 1034 | |
| 1035 | static void slab_bug(struct kmem_cache *s, const char *fmt, ...) |
| 1036 | { |
| 1037 | va_list args; |
| 1038 | |
| 1039 | va_start(args, fmt); |
| 1040 | __slab_bug(s, fmt, args); |
| 1041 | va_end(args); |
| 1042 | } |
| 1043 | |
| 1044 | __printf(2, 3) |
| 1045 | static void slab_fix(struct kmem_cache *s, const char *fmt, ...) |
| 1046 | { |
| 1047 | struct va_format vaf; |
| 1048 | va_list args; |
| 1049 | |
| 1050 | if (slab_add_kunit_errors()) |
| 1051 | return; |
| 1052 | |
| 1053 | va_start(args, fmt); |
| 1054 | vaf.fmt = fmt; |
| 1055 | vaf.va = &args; |
| 1056 | pr_err("FIX %s: %pV\n", s->name, &vaf); |
| 1057 | va_end(args); |
| 1058 | } |
| 1059 | |
| 1060 | static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) |
| 1061 | { |
| 1062 | unsigned int off; /* Offset of last byte */ |
| 1063 | u8 *addr = slab_address(slab); |
| 1064 | |
| 1065 | print_tracking(s, p); |
| 1066 | |
| 1067 | print_slab_info(slab); |
| 1068 | |
| 1069 | pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", |
| 1070 | p, p - addr, get_freepointer(s, p)); |
| 1071 | |
| 1072 | if (s->flags & SLAB_RED_ZONE) |
| 1073 | print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, |
| 1074 | s->red_left_pad); |
| 1075 | else if (p > addr + 16) |
| 1076 | print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); |
| 1077 | |
| 1078 | print_section(KERN_ERR, "Object ", p, |
| 1079 | min_t(unsigned int, s->object_size, PAGE_SIZE)); |
| 1080 | if (s->flags & SLAB_RED_ZONE) |
| 1081 | print_section(KERN_ERR, "Redzone ", p + s->object_size, |
| 1082 | s->inuse - s->object_size); |
| 1083 | |
| 1084 | off = get_info_end(s); |
| 1085 | |
| 1086 | if (s->flags & SLAB_STORE_USER) |
| 1087 | off += 2 * sizeof(struct track); |
| 1088 | |
| 1089 | if (slub_debug_orig_size(s)) |
| 1090 | off += sizeof(unsigned int); |
| 1091 | |
| 1092 | off += kasan_metadata_size(s, false); |
| 1093 | |
| 1094 | if (off != size_from_object(s)) |
| 1095 | /* Beginning of the filler is the free pointer */ |
| 1096 | print_section(KERN_ERR, "Padding ", p + off, |
| 1097 | size_from_object(s) - off); |
| 1098 | } |
| 1099 | |
| 1100 | static void object_err(struct kmem_cache *s, struct slab *slab, |
| 1101 | u8 *object, const char *reason) |
| 1102 | { |
| 1103 | if (slab_add_kunit_errors()) |
| 1104 | return; |
| 1105 | |
| 1106 | slab_bug(s, reason); |
| 1107 | print_trailer(s, slab, object); |
| 1108 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
| 1109 | |
| 1110 | WARN_ON(1); |
| 1111 | } |
| 1112 | |
| 1113 | static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, |
| 1114 | void **freelist, void *nextfree) |
| 1115 | { |
| 1116 | if ((s->flags & SLAB_CONSISTENCY_CHECKS) && |
| 1117 | !check_valid_pointer(s, slab, nextfree) && freelist) { |
| 1118 | object_err(s, slab, *freelist, "Freechain corrupt"); |
| 1119 | *freelist = NULL; |
| 1120 | slab_fix(s, "Isolate corrupted freechain"); |
| 1121 | return true; |
| 1122 | } |
| 1123 | |
| 1124 | return false; |
| 1125 | } |
| 1126 | |
| 1127 | static void __slab_err(struct slab *slab) |
| 1128 | { |
| 1129 | if (slab_in_kunit_test()) |
| 1130 | return; |
| 1131 | |
| 1132 | print_slab_info(slab); |
| 1133 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
| 1134 | |
| 1135 | WARN_ON(1); |
| 1136 | } |
| 1137 | |
| 1138 | static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, |
| 1139 | const char *fmt, ...) |
| 1140 | { |
| 1141 | va_list args; |
| 1142 | |
| 1143 | if (slab_add_kunit_errors()) |
| 1144 | return; |
| 1145 | |
| 1146 | va_start(args, fmt); |
| 1147 | __slab_bug(s, fmt, args); |
| 1148 | va_end(args); |
| 1149 | |
| 1150 | __slab_err(slab); |
| 1151 | } |
| 1152 | |
| 1153 | static void init_object(struct kmem_cache *s, void *object, u8 val) |
| 1154 | { |
| 1155 | u8 *p = kasan_reset_tag(object); |
| 1156 | unsigned int poison_size = s->object_size; |
| 1157 | |
| 1158 | if (s->flags & SLAB_RED_ZONE) { |
| 1159 | /* |
| 1160 | * Here and below, avoid overwriting the KMSAN shadow. Keeping |
| 1161 | * the shadow makes it possible to distinguish uninit-value |
| 1162 | * from use-after-free. |
| 1163 | */ |
| 1164 | memset_no_sanitize_memory(p - s->red_left_pad, val, |
| 1165 | s->red_left_pad); |
| 1166 | |
| 1167 | if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { |
| 1168 | /* |
| 1169 | * Redzone the extra allocated space by kmalloc than |
| 1170 | * requested, and the poison size will be limited to |
| 1171 | * the original request size accordingly. |
| 1172 | */ |
| 1173 | poison_size = get_orig_size(s, object); |
| 1174 | } |
| 1175 | } |
| 1176 | |
| 1177 | if (s->flags & __OBJECT_POISON) { |
| 1178 | memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); |
| 1179 | memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); |
| 1180 | } |
| 1181 | |
| 1182 | if (s->flags & SLAB_RED_ZONE) |
| 1183 | memset_no_sanitize_memory(p + poison_size, val, |
| 1184 | s->inuse - poison_size); |
| 1185 | } |
| 1186 | |
| 1187 | static void restore_bytes(struct kmem_cache *s, const char *message, u8 data, |
| 1188 | void *from, void *to) |
| 1189 | { |
| 1190 | slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); |
| 1191 | memset(from, data, to - from); |
| 1192 | } |
| 1193 | |
| 1194 | #ifdef CONFIG_KMSAN |
| 1195 | #define pad_check_attributes noinline __no_kmsan_checks |
| 1196 | #else |
| 1197 | #define pad_check_attributes |
| 1198 | #endif |
| 1199 | |
| 1200 | static pad_check_attributes int |
| 1201 | check_bytes_and_report(struct kmem_cache *s, struct slab *slab, |
| 1202 | u8 *object, const char *what, u8 *start, unsigned int value, |
| 1203 | unsigned int bytes, bool slab_obj_print) |
| 1204 | { |
| 1205 | u8 *fault; |
| 1206 | u8 *end; |
| 1207 | u8 *addr = slab_address(slab); |
| 1208 | |
| 1209 | metadata_access_enable(); |
| 1210 | fault = memchr_inv(kasan_reset_tag(start), value, bytes); |
| 1211 | metadata_access_disable(); |
| 1212 | if (!fault) |
| 1213 | return 1; |
| 1214 | |
| 1215 | end = start + bytes; |
| 1216 | while (end > fault && end[-1] == value) |
| 1217 | end--; |
| 1218 | |
| 1219 | if (slab_add_kunit_errors()) |
| 1220 | goto skip_bug_print; |
| 1221 | |
| 1222 | pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", |
| 1223 | what, fault, end - 1, fault - addr, fault[0], value); |
| 1224 | |
| 1225 | if (slab_obj_print) |
| 1226 | object_err(s, slab, object, "Object corrupt"); |
| 1227 | |
| 1228 | skip_bug_print: |
| 1229 | restore_bytes(s, what, value, fault, end); |
| 1230 | return 0; |
| 1231 | } |
| 1232 | |
| 1233 | /* |
| 1234 | * Object layout: |
| 1235 | * |
| 1236 | * object address |
| 1237 | * Bytes of the object to be managed. |
| 1238 | * If the freepointer may overlay the object then the free |
| 1239 | * pointer is at the middle of the object. |
| 1240 | * |
| 1241 | * Poisoning uses 0x6b (POISON_FREE) and the last byte is |
| 1242 | * 0xa5 (POISON_END) |
| 1243 | * |
| 1244 | * object + s->object_size |
| 1245 | * Padding to reach word boundary. This is also used for Redzoning. |
| 1246 | * Padding is extended by another word if Redzoning is enabled and |
| 1247 | * object_size == inuse. |
| 1248 | * |
| 1249 | * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with |
| 1250 | * 0xcc (SLUB_RED_ACTIVE) for objects in use. |
| 1251 | * |
| 1252 | * object + s->inuse |
| 1253 | * Meta data starts here. |
| 1254 | * |
| 1255 | * A. Free pointer (if we cannot overwrite object on free) |
| 1256 | * B. Tracking data for SLAB_STORE_USER |
| 1257 | * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) |
| 1258 | * D. Padding to reach required alignment boundary or at minimum |
| 1259 | * one word if debugging is on to be able to detect writes |
| 1260 | * before the word boundary. |
| 1261 | * |
| 1262 | * Padding is done using 0x5a (POISON_INUSE) |
| 1263 | * |
| 1264 | * object + s->size |
| 1265 | * Nothing is used beyond s->size. |
| 1266 | * |
| 1267 | * If slabcaches are merged then the object_size and inuse boundaries are mostly |
| 1268 | * ignored. And therefore no slab options that rely on these boundaries |
| 1269 | * may be used with merged slabcaches. |
| 1270 | */ |
| 1271 | |
| 1272 | static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) |
| 1273 | { |
| 1274 | unsigned long off = get_info_end(s); /* The end of info */ |
| 1275 | |
| 1276 | if (s->flags & SLAB_STORE_USER) { |
| 1277 | /* We also have user information there */ |
| 1278 | off += 2 * sizeof(struct track); |
| 1279 | |
| 1280 | if (s->flags & SLAB_KMALLOC) |
| 1281 | off += sizeof(unsigned int); |
| 1282 | } |
| 1283 | |
| 1284 | off += kasan_metadata_size(s, false); |
| 1285 | |
| 1286 | if (size_from_object(s) == off) |
| 1287 | return 1; |
| 1288 | |
| 1289 | return check_bytes_and_report(s, slab, p, "Object padding", |
| 1290 | p + off, POISON_INUSE, size_from_object(s) - off, true); |
| 1291 | } |
| 1292 | |
| 1293 | /* Check the pad bytes at the end of a slab page */ |
| 1294 | static pad_check_attributes void |
| 1295 | slab_pad_check(struct kmem_cache *s, struct slab *slab) |
| 1296 | { |
| 1297 | u8 *start; |
| 1298 | u8 *fault; |
| 1299 | u8 *end; |
| 1300 | u8 *pad; |
| 1301 | int length; |
| 1302 | int remainder; |
| 1303 | |
| 1304 | if (!(s->flags & SLAB_POISON)) |
| 1305 | return; |
| 1306 | |
| 1307 | start = slab_address(slab); |
| 1308 | length = slab_size(slab); |
| 1309 | end = start + length; |
| 1310 | remainder = length % s->size; |
| 1311 | if (!remainder) |
| 1312 | return; |
| 1313 | |
| 1314 | pad = end - remainder; |
| 1315 | metadata_access_enable(); |
| 1316 | fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); |
| 1317 | metadata_access_disable(); |
| 1318 | if (!fault) |
| 1319 | return; |
| 1320 | while (end > fault && end[-1] == POISON_INUSE) |
| 1321 | end--; |
| 1322 | |
| 1323 | slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu", |
| 1324 | fault, end - 1, fault - start); |
| 1325 | print_section(KERN_ERR, "Padding ", pad, remainder); |
| 1326 | __slab_err(slab); |
| 1327 | |
| 1328 | restore_bytes(s, "slab padding", POISON_INUSE, fault, end); |
| 1329 | } |
| 1330 | |
| 1331 | static int check_object(struct kmem_cache *s, struct slab *slab, |
| 1332 | void *object, u8 val) |
| 1333 | { |
| 1334 | u8 *p = object; |
| 1335 | u8 *endobject = object + s->object_size; |
| 1336 | unsigned int orig_size, kasan_meta_size; |
| 1337 | int ret = 1; |
| 1338 | |
| 1339 | if (s->flags & SLAB_RED_ZONE) { |
| 1340 | if (!check_bytes_and_report(s, slab, object, "Left Redzone", |
| 1341 | object - s->red_left_pad, val, s->red_left_pad, ret)) |
| 1342 | ret = 0; |
| 1343 | |
| 1344 | if (!check_bytes_and_report(s, slab, object, "Right Redzone", |
| 1345 | endobject, val, s->inuse - s->object_size, ret)) |
| 1346 | ret = 0; |
| 1347 | |
| 1348 | if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { |
| 1349 | orig_size = get_orig_size(s, object); |
| 1350 | |
| 1351 | if (s->object_size > orig_size && |
| 1352 | !check_bytes_and_report(s, slab, object, |
| 1353 | "kmalloc Redzone", p + orig_size, |
| 1354 | val, s->object_size - orig_size, ret)) { |
| 1355 | ret = 0; |
| 1356 | } |
| 1357 | } |
| 1358 | } else { |
| 1359 | if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { |
| 1360 | if (!check_bytes_and_report(s, slab, p, "Alignment padding", |
| 1361 | endobject, POISON_INUSE, |
| 1362 | s->inuse - s->object_size, ret)) |
| 1363 | ret = 0; |
| 1364 | } |
| 1365 | } |
| 1366 | |
| 1367 | if (s->flags & SLAB_POISON) { |
| 1368 | if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { |
| 1369 | /* |
| 1370 | * KASAN can save its free meta data inside of the |
| 1371 | * object at offset 0. Thus, skip checking the part of |
| 1372 | * the redzone that overlaps with the meta data. |
| 1373 | */ |
| 1374 | kasan_meta_size = kasan_metadata_size(s, true); |
| 1375 | if (kasan_meta_size < s->object_size - 1 && |
| 1376 | !check_bytes_and_report(s, slab, p, "Poison", |
| 1377 | p + kasan_meta_size, POISON_FREE, |
| 1378 | s->object_size - kasan_meta_size - 1, ret)) |
| 1379 | ret = 0; |
| 1380 | if (kasan_meta_size < s->object_size && |
| 1381 | !check_bytes_and_report(s, slab, p, "End Poison", |
| 1382 | p + s->object_size - 1, POISON_END, 1, ret)) |
| 1383 | ret = 0; |
| 1384 | } |
| 1385 | /* |
| 1386 | * check_pad_bytes cleans up on its own. |
| 1387 | */ |
| 1388 | if (!check_pad_bytes(s, slab, p)) |
| 1389 | ret = 0; |
| 1390 | } |
| 1391 | |
| 1392 | /* |
| 1393 | * Cannot check freepointer while object is allocated if |
| 1394 | * object and freepointer overlap. |
| 1395 | */ |
| 1396 | if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && |
| 1397 | !check_valid_pointer(s, slab, get_freepointer(s, p))) { |
| 1398 | object_err(s, slab, p, "Freepointer corrupt"); |
| 1399 | /* |
| 1400 | * No choice but to zap it and thus lose the remainder |
| 1401 | * of the free objects in this slab. May cause |
| 1402 | * another error because the object count is now wrong. |
| 1403 | */ |
| 1404 | set_freepointer(s, p, NULL); |
| 1405 | ret = 0; |
| 1406 | } |
| 1407 | |
| 1408 | return ret; |
| 1409 | } |
| 1410 | |
| 1411 | static int check_slab(struct kmem_cache *s, struct slab *slab) |
| 1412 | { |
| 1413 | int maxobj; |
| 1414 | |
| 1415 | if (!folio_test_slab(slab_folio(slab))) { |
| 1416 | slab_err(s, slab, "Not a valid slab page"); |
| 1417 | return 0; |
| 1418 | } |
| 1419 | |
| 1420 | maxobj = order_objects(slab_order(slab), s->size); |
| 1421 | if (slab->objects > maxobj) { |
| 1422 | slab_err(s, slab, "objects %u > max %u", |
| 1423 | slab->objects, maxobj); |
| 1424 | return 0; |
| 1425 | } |
| 1426 | if (slab->inuse > slab->objects) { |
| 1427 | slab_err(s, slab, "inuse %u > max %u", |
| 1428 | slab->inuse, slab->objects); |
| 1429 | return 0; |
| 1430 | } |
| 1431 | if (slab->frozen) { |
| 1432 | slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed"); |
| 1433 | return 0; |
| 1434 | } |
| 1435 | |
| 1436 | /* Slab_pad_check fixes things up after itself */ |
| 1437 | slab_pad_check(s, slab); |
| 1438 | return 1; |
| 1439 | } |
| 1440 | |
| 1441 | /* |
| 1442 | * Determine if a certain object in a slab is on the freelist. Must hold the |
| 1443 | * slab lock to guarantee that the chains are in a consistent state. |
| 1444 | */ |
| 1445 | static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search) |
| 1446 | { |
| 1447 | int nr = 0; |
| 1448 | void *fp; |
| 1449 | void *object = NULL; |
| 1450 | int max_objects; |
| 1451 | |
| 1452 | fp = slab->freelist; |
| 1453 | while (fp && nr <= slab->objects) { |
| 1454 | if (fp == search) |
| 1455 | return true; |
| 1456 | if (!check_valid_pointer(s, slab, fp)) { |
| 1457 | if (object) { |
| 1458 | object_err(s, slab, object, |
| 1459 | "Freechain corrupt"); |
| 1460 | set_freepointer(s, object, NULL); |
| 1461 | break; |
| 1462 | } else { |
| 1463 | slab_err(s, slab, "Freepointer corrupt"); |
| 1464 | slab->freelist = NULL; |
| 1465 | slab->inuse = slab->objects; |
| 1466 | slab_fix(s, "Freelist cleared"); |
| 1467 | return false; |
| 1468 | } |
| 1469 | } |
| 1470 | object = fp; |
| 1471 | fp = get_freepointer(s, object); |
| 1472 | nr++; |
| 1473 | } |
| 1474 | |
| 1475 | if (nr > slab->objects) { |
| 1476 | slab_err(s, slab, "Freelist cycle detected"); |
| 1477 | slab->freelist = NULL; |
| 1478 | slab->inuse = slab->objects; |
| 1479 | slab_fix(s, "Freelist cleared"); |
| 1480 | return false; |
| 1481 | } |
| 1482 | |
| 1483 | max_objects = order_objects(slab_order(slab), s->size); |
| 1484 | if (max_objects > MAX_OBJS_PER_PAGE) |
| 1485 | max_objects = MAX_OBJS_PER_PAGE; |
| 1486 | |
| 1487 | if (slab->objects != max_objects) { |
| 1488 | slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", |
| 1489 | slab->objects, max_objects); |
| 1490 | slab->objects = max_objects; |
| 1491 | slab_fix(s, "Number of objects adjusted"); |
| 1492 | } |
| 1493 | if (slab->inuse != slab->objects - nr) { |
| 1494 | slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", |
| 1495 | slab->inuse, slab->objects - nr); |
| 1496 | slab->inuse = slab->objects - nr; |
| 1497 | slab_fix(s, "Object count adjusted"); |
| 1498 | } |
| 1499 | return search == NULL; |
| 1500 | } |
| 1501 | |
| 1502 | static void trace(struct kmem_cache *s, struct slab *slab, void *object, |
| 1503 | int alloc) |
| 1504 | { |
| 1505 | if (s->flags & SLAB_TRACE) { |
| 1506 | pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", |
| 1507 | s->name, |
| 1508 | alloc ? "alloc" : "free", |
| 1509 | object, slab->inuse, |
| 1510 | slab->freelist); |
| 1511 | |
| 1512 | if (!alloc) |
| 1513 | print_section(KERN_INFO, "Object ", (void *)object, |
| 1514 | s->object_size); |
| 1515 | |
| 1516 | dump_stack(); |
| 1517 | } |
| 1518 | } |
| 1519 | |
| 1520 | /* |
| 1521 | * Tracking of fully allocated slabs for debugging purposes. |
| 1522 | */ |
| 1523 | static void add_full(struct kmem_cache *s, |
| 1524 | struct kmem_cache_node *n, struct slab *slab) |
| 1525 | { |
| 1526 | if (!(s->flags & SLAB_STORE_USER)) |
| 1527 | return; |
| 1528 | |
| 1529 | lockdep_assert_held(&n->list_lock); |
| 1530 | list_add(&slab->slab_list, &n->full); |
| 1531 | } |
| 1532 | |
| 1533 | static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) |
| 1534 | { |
| 1535 | if (!(s->flags & SLAB_STORE_USER)) |
| 1536 | return; |
| 1537 | |
| 1538 | lockdep_assert_held(&n->list_lock); |
| 1539 | list_del(&slab->slab_list); |
| 1540 | } |
| 1541 | |
| 1542 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) |
| 1543 | { |
| 1544 | return atomic_long_read(&n->nr_slabs); |
| 1545 | } |
| 1546 | |
| 1547 | static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) |
| 1548 | { |
| 1549 | struct kmem_cache_node *n = get_node(s, node); |
| 1550 | |
| 1551 | atomic_long_inc(&n->nr_slabs); |
| 1552 | atomic_long_add(objects, &n->total_objects); |
| 1553 | } |
| 1554 | static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) |
| 1555 | { |
| 1556 | struct kmem_cache_node *n = get_node(s, node); |
| 1557 | |
| 1558 | atomic_long_dec(&n->nr_slabs); |
| 1559 | atomic_long_sub(objects, &n->total_objects); |
| 1560 | } |
| 1561 | |
| 1562 | /* Object debug checks for alloc/free paths */ |
| 1563 | static void setup_object_debug(struct kmem_cache *s, void *object) |
| 1564 | { |
| 1565 | if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) |
| 1566 | return; |
| 1567 | |
| 1568 | init_object(s, object, SLUB_RED_INACTIVE); |
| 1569 | init_tracking(s, object); |
| 1570 | } |
| 1571 | |
| 1572 | static |
| 1573 | void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) |
| 1574 | { |
| 1575 | if (!kmem_cache_debug_flags(s, SLAB_POISON)) |
| 1576 | return; |
| 1577 | |
| 1578 | metadata_access_enable(); |
| 1579 | memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); |
| 1580 | metadata_access_disable(); |
| 1581 | } |
| 1582 | |
| 1583 | static inline int alloc_consistency_checks(struct kmem_cache *s, |
| 1584 | struct slab *slab, void *object) |
| 1585 | { |
| 1586 | if (!check_slab(s, slab)) |
| 1587 | return 0; |
| 1588 | |
| 1589 | if (!check_valid_pointer(s, slab, object)) { |
| 1590 | object_err(s, slab, object, "Freelist Pointer check fails"); |
| 1591 | return 0; |
| 1592 | } |
| 1593 | |
| 1594 | if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) |
| 1595 | return 0; |
| 1596 | |
| 1597 | return 1; |
| 1598 | } |
| 1599 | |
| 1600 | static noinline bool alloc_debug_processing(struct kmem_cache *s, |
| 1601 | struct slab *slab, void *object, int orig_size) |
| 1602 | { |
| 1603 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
| 1604 | if (!alloc_consistency_checks(s, slab, object)) |
| 1605 | goto bad; |
| 1606 | } |
| 1607 | |
| 1608 | /* Success. Perform special debug activities for allocs */ |
| 1609 | trace(s, slab, object, 1); |
| 1610 | set_orig_size(s, object, orig_size); |
| 1611 | init_object(s, object, SLUB_RED_ACTIVE); |
| 1612 | return true; |
| 1613 | |
| 1614 | bad: |
| 1615 | if (folio_test_slab(slab_folio(slab))) { |
| 1616 | /* |
| 1617 | * If this is a slab page then lets do the best we can |
| 1618 | * to avoid issues in the future. Marking all objects |
| 1619 | * as used avoids touching the remaining objects. |
| 1620 | */ |
| 1621 | slab_fix(s, "Marking all objects used"); |
| 1622 | slab->inuse = slab->objects; |
| 1623 | slab->freelist = NULL; |
| 1624 | slab->frozen = 1; /* mark consistency-failed slab as frozen */ |
| 1625 | } |
| 1626 | return false; |
| 1627 | } |
| 1628 | |
| 1629 | static inline int free_consistency_checks(struct kmem_cache *s, |
| 1630 | struct slab *slab, void *object, unsigned long addr) |
| 1631 | { |
| 1632 | if (!check_valid_pointer(s, slab, object)) { |
| 1633 | slab_err(s, slab, "Invalid object pointer 0x%p", object); |
| 1634 | return 0; |
| 1635 | } |
| 1636 | |
| 1637 | if (on_freelist(s, slab, object)) { |
| 1638 | object_err(s, slab, object, "Object already free"); |
| 1639 | return 0; |
| 1640 | } |
| 1641 | |
| 1642 | if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) |
| 1643 | return 0; |
| 1644 | |
| 1645 | if (unlikely(s != slab->slab_cache)) { |
| 1646 | if (!folio_test_slab(slab_folio(slab))) { |
| 1647 | slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", |
| 1648 | object); |
| 1649 | } else if (!slab->slab_cache) { |
| 1650 | slab_err(NULL, slab, "No slab cache for object 0x%p", |
| 1651 | object); |
| 1652 | } else { |
| 1653 | object_err(s, slab, object, |
| 1654 | "page slab pointer corrupt."); |
| 1655 | } |
| 1656 | return 0; |
| 1657 | } |
| 1658 | return 1; |
| 1659 | } |
| 1660 | |
| 1661 | /* |
| 1662 | * Parse a block of slab_debug options. Blocks are delimited by ';' |
| 1663 | * |
| 1664 | * @str: start of block |
| 1665 | * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified |
| 1666 | * @slabs: return start of list of slabs, or NULL when there's no list |
| 1667 | * @init: assume this is initial parsing and not per-kmem-create parsing |
| 1668 | * |
| 1669 | * returns the start of next block if there's any, or NULL |
| 1670 | */ |
| 1671 | static char * |
| 1672 | parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) |
| 1673 | { |
| 1674 | bool higher_order_disable = false; |
| 1675 | |
| 1676 | /* Skip any completely empty blocks */ |
| 1677 | while (*str && *str == ';') |
| 1678 | str++; |
| 1679 | |
| 1680 | if (*str == ',') { |
| 1681 | /* |
| 1682 | * No options but restriction on slabs. This means full |
| 1683 | * debugging for slabs matching a pattern. |
| 1684 | */ |
| 1685 | *flags = DEBUG_DEFAULT_FLAGS; |
| 1686 | goto check_slabs; |
| 1687 | } |
| 1688 | *flags = 0; |
| 1689 | |
| 1690 | /* Determine which debug features should be switched on */ |
| 1691 | for (; *str && *str != ',' && *str != ';'; str++) { |
| 1692 | switch (tolower(*str)) { |
| 1693 | case '-': |
| 1694 | *flags = 0; |
| 1695 | break; |
| 1696 | case 'f': |
| 1697 | *flags |= SLAB_CONSISTENCY_CHECKS; |
| 1698 | break; |
| 1699 | case 'z': |
| 1700 | *flags |= SLAB_RED_ZONE; |
| 1701 | break; |
| 1702 | case 'p': |
| 1703 | *flags |= SLAB_POISON; |
| 1704 | break; |
| 1705 | case 'u': |
| 1706 | *flags |= SLAB_STORE_USER; |
| 1707 | break; |
| 1708 | case 't': |
| 1709 | *flags |= SLAB_TRACE; |
| 1710 | break; |
| 1711 | case 'a': |
| 1712 | *flags |= SLAB_FAILSLAB; |
| 1713 | break; |
| 1714 | case 'o': |
| 1715 | /* |
| 1716 | * Avoid enabling debugging on caches if its minimum |
| 1717 | * order would increase as a result. |
| 1718 | */ |
| 1719 | higher_order_disable = true; |
| 1720 | break; |
| 1721 | default: |
| 1722 | if (init) |
| 1723 | pr_err("slab_debug option '%c' unknown. skipped\n", *str); |
| 1724 | } |
| 1725 | } |
| 1726 | check_slabs: |
| 1727 | if (*str == ',') |
| 1728 | *slabs = ++str; |
| 1729 | else |
| 1730 | *slabs = NULL; |
| 1731 | |
| 1732 | /* Skip over the slab list */ |
| 1733 | while (*str && *str != ';') |
| 1734 | str++; |
| 1735 | |
| 1736 | /* Skip any completely empty blocks */ |
| 1737 | while (*str && *str == ';') |
| 1738 | str++; |
| 1739 | |
| 1740 | if (init && higher_order_disable) |
| 1741 | disable_higher_order_debug = 1; |
| 1742 | |
| 1743 | if (*str) |
| 1744 | return str; |
| 1745 | else |
| 1746 | return NULL; |
| 1747 | } |
| 1748 | |
| 1749 | static int __init setup_slub_debug(char *str) |
| 1750 | { |
| 1751 | slab_flags_t flags; |
| 1752 | slab_flags_t global_flags; |
| 1753 | char *saved_str; |
| 1754 | char *slab_list; |
| 1755 | bool global_slub_debug_changed = false; |
| 1756 | bool slab_list_specified = false; |
| 1757 | |
| 1758 | global_flags = DEBUG_DEFAULT_FLAGS; |
| 1759 | if (*str++ != '=' || !*str) |
| 1760 | /* |
| 1761 | * No options specified. Switch on full debugging. |
| 1762 | */ |
| 1763 | goto out; |
| 1764 | |
| 1765 | saved_str = str; |
| 1766 | while (str) { |
| 1767 | str = parse_slub_debug_flags(str, &flags, &slab_list, true); |
| 1768 | |
| 1769 | if (!slab_list) { |
| 1770 | global_flags = flags; |
| 1771 | global_slub_debug_changed = true; |
| 1772 | } else { |
| 1773 | slab_list_specified = true; |
| 1774 | if (flags & SLAB_STORE_USER) |
| 1775 | stack_depot_request_early_init(); |
| 1776 | } |
| 1777 | } |
| 1778 | |
| 1779 | /* |
| 1780 | * For backwards compatibility, a single list of flags with list of |
| 1781 | * slabs means debugging is only changed for those slabs, so the global |
| 1782 | * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending |
| 1783 | * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as |
| 1784 | * long as there is no option specifying flags without a slab list. |
| 1785 | */ |
| 1786 | if (slab_list_specified) { |
| 1787 | if (!global_slub_debug_changed) |
| 1788 | global_flags = slub_debug; |
| 1789 | slub_debug_string = saved_str; |
| 1790 | } |
| 1791 | out: |
| 1792 | slub_debug = global_flags; |
| 1793 | if (slub_debug & SLAB_STORE_USER) |
| 1794 | stack_depot_request_early_init(); |
| 1795 | if (slub_debug != 0 || slub_debug_string) |
| 1796 | static_branch_enable(&slub_debug_enabled); |
| 1797 | else |
| 1798 | static_branch_disable(&slub_debug_enabled); |
| 1799 | if ((static_branch_unlikely(&init_on_alloc) || |
| 1800 | static_branch_unlikely(&init_on_free)) && |
| 1801 | (slub_debug & SLAB_POISON)) |
| 1802 | pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); |
| 1803 | return 1; |
| 1804 | } |
| 1805 | |
| 1806 | __setup("slab_debug", setup_slub_debug); |
| 1807 | __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); |
| 1808 | |
| 1809 | /* |
| 1810 | * kmem_cache_flags - apply debugging options to the cache |
| 1811 | * @flags: flags to set |
| 1812 | * @name: name of the cache |
| 1813 | * |
| 1814 | * Debug option(s) are applied to @flags. In addition to the debug |
| 1815 | * option(s), if a slab name (or multiple) is specified i.e. |
| 1816 | * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... |
| 1817 | * then only the select slabs will receive the debug option(s). |
| 1818 | */ |
| 1819 | slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) |
| 1820 | { |
| 1821 | char *iter; |
| 1822 | size_t len; |
| 1823 | char *next_block; |
| 1824 | slab_flags_t block_flags; |
| 1825 | slab_flags_t slub_debug_local = slub_debug; |
| 1826 | |
| 1827 | if (flags & SLAB_NO_USER_FLAGS) |
| 1828 | return flags; |
| 1829 | |
| 1830 | /* |
| 1831 | * If the slab cache is for debugging (e.g. kmemleak) then |
| 1832 | * don't store user (stack trace) information by default, |
| 1833 | * but let the user enable it via the command line below. |
| 1834 | */ |
| 1835 | if (flags & SLAB_NOLEAKTRACE) |
| 1836 | slub_debug_local &= ~SLAB_STORE_USER; |
| 1837 | |
| 1838 | len = strlen(name); |
| 1839 | next_block = slub_debug_string; |
| 1840 | /* Go through all blocks of debug options, see if any matches our slab's name */ |
| 1841 | while (next_block) { |
| 1842 | next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); |
| 1843 | if (!iter) |
| 1844 | continue; |
| 1845 | /* Found a block that has a slab list, search it */ |
| 1846 | while (*iter) { |
| 1847 | char *end, *glob; |
| 1848 | size_t cmplen; |
| 1849 | |
| 1850 | end = strchrnul(iter, ','); |
| 1851 | if (next_block && next_block < end) |
| 1852 | end = next_block - 1; |
| 1853 | |
| 1854 | glob = strnchr(iter, end - iter, '*'); |
| 1855 | if (glob) |
| 1856 | cmplen = glob - iter; |
| 1857 | else |
| 1858 | cmplen = max_t(size_t, len, (end - iter)); |
| 1859 | |
| 1860 | if (!strncmp(name, iter, cmplen)) { |
| 1861 | flags |= block_flags; |
| 1862 | return flags; |
| 1863 | } |
| 1864 | |
| 1865 | if (!*end || *end == ';') |
| 1866 | break; |
| 1867 | iter = end + 1; |
| 1868 | } |
| 1869 | } |
| 1870 | |
| 1871 | return flags | slub_debug_local; |
| 1872 | } |
| 1873 | #else /* !CONFIG_SLUB_DEBUG */ |
| 1874 | static inline void setup_object_debug(struct kmem_cache *s, void *object) {} |
| 1875 | static inline |
| 1876 | void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} |
| 1877 | |
| 1878 | static inline bool alloc_debug_processing(struct kmem_cache *s, |
| 1879 | struct slab *slab, void *object, int orig_size) { return true; } |
| 1880 | |
| 1881 | static inline bool free_debug_processing(struct kmem_cache *s, |
| 1882 | struct slab *slab, void *head, void *tail, int *bulk_cnt, |
| 1883 | unsigned long addr, depot_stack_handle_t handle) { return true; } |
| 1884 | |
| 1885 | static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} |
| 1886 | static inline int check_object(struct kmem_cache *s, struct slab *slab, |
| 1887 | void *object, u8 val) { return 1; } |
| 1888 | static inline depot_stack_handle_t set_track_prepare(void) { return 0; } |
| 1889 | static inline void set_track(struct kmem_cache *s, void *object, |
| 1890 | enum track_item alloc, unsigned long addr) {} |
| 1891 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, |
| 1892 | struct slab *slab) {} |
| 1893 | static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, |
| 1894 | struct slab *slab) {} |
| 1895 | slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) |
| 1896 | { |
| 1897 | return flags; |
| 1898 | } |
| 1899 | #define slub_debug 0 |
| 1900 | |
| 1901 | #define disable_higher_order_debug 0 |
| 1902 | |
| 1903 | static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) |
| 1904 | { return 0; } |
| 1905 | static inline void inc_slabs_node(struct kmem_cache *s, int node, |
| 1906 | int objects) {} |
| 1907 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
| 1908 | int objects) {} |
| 1909 | #ifndef CONFIG_SLUB_TINY |
| 1910 | static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, |
| 1911 | void **freelist, void *nextfree) |
| 1912 | { |
| 1913 | return false; |
| 1914 | } |
| 1915 | #endif |
| 1916 | #endif /* CONFIG_SLUB_DEBUG */ |
| 1917 | |
| 1918 | #ifdef CONFIG_SLAB_OBJ_EXT |
| 1919 | |
| 1920 | #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG |
| 1921 | |
| 1922 | static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) |
| 1923 | { |
| 1924 | struct slabobj_ext *slab_exts; |
| 1925 | struct slab *obj_exts_slab; |
| 1926 | |
| 1927 | obj_exts_slab = virt_to_slab(obj_exts); |
| 1928 | slab_exts = slab_obj_exts(obj_exts_slab); |
| 1929 | if (slab_exts) { |
| 1930 | unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, |
| 1931 | obj_exts_slab, obj_exts); |
| 1932 | /* codetag should be NULL */ |
| 1933 | WARN_ON(slab_exts[offs].ref.ct); |
| 1934 | set_codetag_empty(&slab_exts[offs].ref); |
| 1935 | } |
| 1936 | } |
| 1937 | |
| 1938 | static inline void mark_failed_objexts_alloc(struct slab *slab) |
| 1939 | { |
| 1940 | slab->obj_exts = OBJEXTS_ALLOC_FAIL; |
| 1941 | } |
| 1942 | |
| 1943 | static inline void handle_failed_objexts_alloc(unsigned long obj_exts, |
| 1944 | struct slabobj_ext *vec, unsigned int objects) |
| 1945 | { |
| 1946 | /* |
| 1947 | * If vector previously failed to allocate then we have live |
| 1948 | * objects with no tag reference. Mark all references in this |
| 1949 | * vector as empty to avoid warnings later on. |
| 1950 | */ |
| 1951 | if (obj_exts & OBJEXTS_ALLOC_FAIL) { |
| 1952 | unsigned int i; |
| 1953 | |
| 1954 | for (i = 0; i < objects; i++) |
| 1955 | set_codetag_empty(&vec[i].ref); |
| 1956 | } |
| 1957 | } |
| 1958 | |
| 1959 | #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ |
| 1960 | |
| 1961 | static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} |
| 1962 | static inline void mark_failed_objexts_alloc(struct slab *slab) {} |
| 1963 | static inline void handle_failed_objexts_alloc(unsigned long obj_exts, |
| 1964 | struct slabobj_ext *vec, unsigned int objects) {} |
| 1965 | |
| 1966 | #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ |
| 1967 | |
| 1968 | /* |
| 1969 | * The allocated objcg pointers array is not accounted directly. |
| 1970 | * Moreover, it should not come from DMA buffer and is not readily |
| 1971 | * reclaimable. So those GFP bits should be masked off. |
| 1972 | */ |
| 1973 | #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ |
| 1974 | __GFP_ACCOUNT | __GFP_NOFAIL) |
| 1975 | |
| 1976 | static inline void init_slab_obj_exts(struct slab *slab) |
| 1977 | { |
| 1978 | slab->obj_exts = 0; |
| 1979 | } |
| 1980 | |
| 1981 | int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, |
| 1982 | gfp_t gfp, bool new_slab) |
| 1983 | { |
| 1984 | unsigned int objects = objs_per_slab(s, slab); |
| 1985 | unsigned long new_exts; |
| 1986 | unsigned long old_exts; |
| 1987 | struct slabobj_ext *vec; |
| 1988 | |
| 1989 | gfp &= ~OBJCGS_CLEAR_MASK; |
| 1990 | /* Prevent recursive extension vector allocation */ |
| 1991 | gfp |= __GFP_NO_OBJ_EXT; |
| 1992 | vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, |
| 1993 | slab_nid(slab)); |
| 1994 | if (!vec) { |
| 1995 | /* Mark vectors which failed to allocate */ |
| 1996 | if (new_slab) |
| 1997 | mark_failed_objexts_alloc(slab); |
| 1998 | |
| 1999 | return -ENOMEM; |
| 2000 | } |
| 2001 | |
| 2002 | new_exts = (unsigned long)vec; |
| 2003 | #ifdef CONFIG_MEMCG |
| 2004 | new_exts |= MEMCG_DATA_OBJEXTS; |
| 2005 | #endif |
| 2006 | old_exts = READ_ONCE(slab->obj_exts); |
| 2007 | handle_failed_objexts_alloc(old_exts, vec, objects); |
| 2008 | if (new_slab) { |
| 2009 | /* |
| 2010 | * If the slab is brand new and nobody can yet access its |
| 2011 | * obj_exts, no synchronization is required and obj_exts can |
| 2012 | * be simply assigned. |
| 2013 | */ |
| 2014 | slab->obj_exts = new_exts; |
| 2015 | } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || |
| 2016 | cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { |
| 2017 | /* |
| 2018 | * If the slab is already in use, somebody can allocate and |
| 2019 | * assign slabobj_exts in parallel. In this case the existing |
| 2020 | * objcg vector should be reused. |
| 2021 | */ |
| 2022 | mark_objexts_empty(vec); |
| 2023 | kfree(vec); |
| 2024 | return 0; |
| 2025 | } |
| 2026 | |
| 2027 | kmemleak_not_leak(vec); |
| 2028 | return 0; |
| 2029 | } |
| 2030 | |
| 2031 | static inline void free_slab_obj_exts(struct slab *slab) |
| 2032 | { |
| 2033 | struct slabobj_ext *obj_exts; |
| 2034 | |
| 2035 | obj_exts = slab_obj_exts(slab); |
| 2036 | if (!obj_exts) |
| 2037 | return; |
| 2038 | |
| 2039 | /* |
| 2040 | * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its |
| 2041 | * corresponding extension will be NULL. alloc_tag_sub() will throw a |
| 2042 | * warning if slab has extensions but the extension of an object is |
| 2043 | * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that |
| 2044 | * the extension for obj_exts is expected to be NULL. |
| 2045 | */ |
| 2046 | mark_objexts_empty(obj_exts); |
| 2047 | kfree(obj_exts); |
| 2048 | slab->obj_exts = 0; |
| 2049 | } |
| 2050 | |
| 2051 | #else /* CONFIG_SLAB_OBJ_EXT */ |
| 2052 | |
| 2053 | static inline void init_slab_obj_exts(struct slab *slab) |
| 2054 | { |
| 2055 | } |
| 2056 | |
| 2057 | static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, |
| 2058 | gfp_t gfp, bool new_slab) |
| 2059 | { |
| 2060 | return 0; |
| 2061 | } |
| 2062 | |
| 2063 | static inline void free_slab_obj_exts(struct slab *slab) |
| 2064 | { |
| 2065 | } |
| 2066 | |
| 2067 | #endif /* CONFIG_SLAB_OBJ_EXT */ |
| 2068 | |
| 2069 | #ifdef CONFIG_MEM_ALLOC_PROFILING |
| 2070 | |
| 2071 | static inline struct slabobj_ext * |
| 2072 | prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) |
| 2073 | { |
| 2074 | struct slab *slab; |
| 2075 | |
| 2076 | if (!p) |
| 2077 | return NULL; |
| 2078 | |
| 2079 | if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) |
| 2080 | return NULL; |
| 2081 | |
| 2082 | if (flags & __GFP_NO_OBJ_EXT) |
| 2083 | return NULL; |
| 2084 | |
| 2085 | slab = virt_to_slab(p); |
| 2086 | if (!slab_obj_exts(slab) && |
| 2087 | alloc_slab_obj_exts(slab, s, flags, false)) { |
| 2088 | pr_warn_once("%s, %s: Failed to create slab extension vector!\n", |
| 2089 | __func__, s->name); |
| 2090 | return NULL; |
| 2091 | } |
| 2092 | |
| 2093 | return slab_obj_exts(slab) + obj_to_index(s, slab, p); |
| 2094 | } |
| 2095 | |
| 2096 | /* Should be called only if mem_alloc_profiling_enabled() */ |
| 2097 | static noinline void |
| 2098 | __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) |
| 2099 | { |
| 2100 | struct slabobj_ext *obj_exts; |
| 2101 | |
| 2102 | obj_exts = prepare_slab_obj_exts_hook(s, flags, object); |
| 2103 | /* |
| 2104 | * Currently obj_exts is used only for allocation profiling. |
| 2105 | * If other users appear then mem_alloc_profiling_enabled() |
| 2106 | * check should be added before alloc_tag_add(). |
| 2107 | */ |
| 2108 | if (likely(obj_exts)) |
| 2109 | alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); |
| 2110 | } |
| 2111 | |
| 2112 | static inline void |
| 2113 | alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) |
| 2114 | { |
| 2115 | if (mem_alloc_profiling_enabled()) |
| 2116 | __alloc_tagging_slab_alloc_hook(s, object, flags); |
| 2117 | } |
| 2118 | |
| 2119 | /* Should be called only if mem_alloc_profiling_enabled() */ |
| 2120 | static noinline void |
| 2121 | __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, |
| 2122 | int objects) |
| 2123 | { |
| 2124 | struct slabobj_ext *obj_exts; |
| 2125 | int i; |
| 2126 | |
| 2127 | /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ |
| 2128 | if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) |
| 2129 | return; |
| 2130 | |
| 2131 | obj_exts = slab_obj_exts(slab); |
| 2132 | if (!obj_exts) |
| 2133 | return; |
| 2134 | |
| 2135 | for (i = 0; i < objects; i++) { |
| 2136 | unsigned int off = obj_to_index(s, slab, p[i]); |
| 2137 | |
| 2138 | alloc_tag_sub(&obj_exts[off].ref, s->size); |
| 2139 | } |
| 2140 | } |
| 2141 | |
| 2142 | static inline void |
| 2143 | alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, |
| 2144 | int objects) |
| 2145 | { |
| 2146 | if (mem_alloc_profiling_enabled()) |
| 2147 | __alloc_tagging_slab_free_hook(s, slab, p, objects); |
| 2148 | } |
| 2149 | |
| 2150 | #else /* CONFIG_MEM_ALLOC_PROFILING */ |
| 2151 | |
| 2152 | static inline void |
| 2153 | alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) |
| 2154 | { |
| 2155 | } |
| 2156 | |
| 2157 | static inline void |
| 2158 | alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, |
| 2159 | int objects) |
| 2160 | { |
| 2161 | } |
| 2162 | |
| 2163 | #endif /* CONFIG_MEM_ALLOC_PROFILING */ |
| 2164 | |
| 2165 | |
| 2166 | #ifdef CONFIG_MEMCG |
| 2167 | |
| 2168 | static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); |
| 2169 | |
| 2170 | static __fastpath_inline |
| 2171 | bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, |
| 2172 | gfp_t flags, size_t size, void **p) |
| 2173 | { |
| 2174 | if (likely(!memcg_kmem_online())) |
| 2175 | return true; |
| 2176 | |
| 2177 | if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) |
| 2178 | return true; |
| 2179 | |
| 2180 | if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) |
| 2181 | return true; |
| 2182 | |
| 2183 | if (likely(size == 1)) { |
| 2184 | memcg_alloc_abort_single(s, *p); |
| 2185 | *p = NULL; |
| 2186 | } else { |
| 2187 | kmem_cache_free_bulk(s, size, p); |
| 2188 | } |
| 2189 | |
| 2190 | return false; |
| 2191 | } |
| 2192 | |
| 2193 | static __fastpath_inline |
| 2194 | void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, |
| 2195 | int objects) |
| 2196 | { |
| 2197 | struct slabobj_ext *obj_exts; |
| 2198 | |
| 2199 | if (!memcg_kmem_online()) |
| 2200 | return; |
| 2201 | |
| 2202 | obj_exts = slab_obj_exts(slab); |
| 2203 | if (likely(!obj_exts)) |
| 2204 | return; |
| 2205 | |
| 2206 | __memcg_slab_free_hook(s, slab, p, objects, obj_exts); |
| 2207 | } |
| 2208 | |
| 2209 | static __fastpath_inline |
| 2210 | bool memcg_slab_post_charge(void *p, gfp_t flags) |
| 2211 | { |
| 2212 | struct slabobj_ext *slab_exts; |
| 2213 | struct kmem_cache *s; |
| 2214 | struct folio *folio; |
| 2215 | struct slab *slab; |
| 2216 | unsigned long off; |
| 2217 | |
| 2218 | folio = virt_to_folio(p); |
| 2219 | if (!folio_test_slab(folio)) { |
| 2220 | int size; |
| 2221 | |
| 2222 | if (folio_memcg_kmem(folio)) |
| 2223 | return true; |
| 2224 | |
| 2225 | if (__memcg_kmem_charge_page(folio_page(folio, 0), flags, |
| 2226 | folio_order(folio))) |
| 2227 | return false; |
| 2228 | |
| 2229 | /* |
| 2230 | * This folio has already been accounted in the global stats but |
| 2231 | * not in the memcg stats. So, subtract from the global and use |
| 2232 | * the interface which adds to both global and memcg stats. |
| 2233 | */ |
| 2234 | size = folio_size(folio); |
| 2235 | node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size); |
| 2236 | lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size); |
| 2237 | return true; |
| 2238 | } |
| 2239 | |
| 2240 | slab = folio_slab(folio); |
| 2241 | s = slab->slab_cache; |
| 2242 | |
| 2243 | /* |
| 2244 | * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency |
| 2245 | * of slab_obj_exts being allocated from the same slab and thus the slab |
| 2246 | * becoming effectively unfreeable. |
| 2247 | */ |
| 2248 | if (is_kmalloc_normal(s)) |
| 2249 | return true; |
| 2250 | |
| 2251 | /* Ignore already charged objects. */ |
| 2252 | slab_exts = slab_obj_exts(slab); |
| 2253 | if (slab_exts) { |
| 2254 | off = obj_to_index(s, slab, p); |
| 2255 | if (unlikely(slab_exts[off].objcg)) |
| 2256 | return true; |
| 2257 | } |
| 2258 | |
| 2259 | return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p); |
| 2260 | } |
| 2261 | |
| 2262 | #else /* CONFIG_MEMCG */ |
| 2263 | static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, |
| 2264 | struct list_lru *lru, |
| 2265 | gfp_t flags, size_t size, |
| 2266 | void **p) |
| 2267 | { |
| 2268 | return true; |
| 2269 | } |
| 2270 | |
| 2271 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
| 2272 | void **p, int objects) |
| 2273 | { |
| 2274 | } |
| 2275 | |
| 2276 | static inline bool memcg_slab_post_charge(void *p, gfp_t flags) |
| 2277 | { |
| 2278 | return true; |
| 2279 | } |
| 2280 | #endif /* CONFIG_MEMCG */ |
| 2281 | |
| 2282 | #ifdef CONFIG_SLUB_RCU_DEBUG |
| 2283 | static void slab_free_after_rcu_debug(struct rcu_head *rcu_head); |
| 2284 | |
| 2285 | struct rcu_delayed_free { |
| 2286 | struct rcu_head head; |
| 2287 | void *object; |
| 2288 | }; |
| 2289 | #endif |
| 2290 | |
| 2291 | /* |
| 2292 | * Hooks for other subsystems that check memory allocations. In a typical |
| 2293 | * production configuration these hooks all should produce no code at all. |
| 2294 | * |
| 2295 | * Returns true if freeing of the object can proceed, false if its reuse |
| 2296 | * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned |
| 2297 | * to KFENCE. |
| 2298 | */ |
| 2299 | static __always_inline |
| 2300 | bool slab_free_hook(struct kmem_cache *s, void *x, bool init, |
| 2301 | bool after_rcu_delay) |
| 2302 | { |
| 2303 | /* Are the object contents still accessible? */ |
| 2304 | bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay; |
| 2305 | |
| 2306 | kmemleak_free_recursive(x, s->flags); |
| 2307 | kmsan_slab_free(s, x); |
| 2308 | |
| 2309 | debug_check_no_locks_freed(x, s->object_size); |
| 2310 | |
| 2311 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
| 2312 | debug_check_no_obj_freed(x, s->object_size); |
| 2313 | |
| 2314 | /* Use KCSAN to help debug racy use-after-free. */ |
| 2315 | if (!still_accessible) |
| 2316 | __kcsan_check_access(x, s->object_size, |
| 2317 | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); |
| 2318 | |
| 2319 | if (kfence_free(x)) |
| 2320 | return false; |
| 2321 | |
| 2322 | /* |
| 2323 | * Give KASAN a chance to notice an invalid free operation before we |
| 2324 | * modify the object. |
| 2325 | */ |
| 2326 | if (kasan_slab_pre_free(s, x)) |
| 2327 | return false; |
| 2328 | |
| 2329 | #ifdef CONFIG_SLUB_RCU_DEBUG |
| 2330 | if (still_accessible) { |
| 2331 | struct rcu_delayed_free *delayed_free; |
| 2332 | |
| 2333 | delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT); |
| 2334 | if (delayed_free) { |
| 2335 | /* |
| 2336 | * Let KASAN track our call stack as a "related work |
| 2337 | * creation", just like if the object had been freed |
| 2338 | * normally via kfree_rcu(). |
| 2339 | * We have to do this manually because the rcu_head is |
| 2340 | * not located inside the object. |
| 2341 | */ |
| 2342 | kasan_record_aux_stack(x); |
| 2343 | |
| 2344 | delayed_free->object = x; |
| 2345 | call_rcu(&delayed_free->head, slab_free_after_rcu_debug); |
| 2346 | return false; |
| 2347 | } |
| 2348 | } |
| 2349 | #endif /* CONFIG_SLUB_RCU_DEBUG */ |
| 2350 | |
| 2351 | /* |
| 2352 | * As memory initialization might be integrated into KASAN, |
| 2353 | * kasan_slab_free and initialization memset's must be |
| 2354 | * kept together to avoid discrepancies in behavior. |
| 2355 | * |
| 2356 | * The initialization memset's clear the object and the metadata, |
| 2357 | * but don't touch the SLAB redzone. |
| 2358 | * |
| 2359 | * The object's freepointer is also avoided if stored outside the |
| 2360 | * object. |
| 2361 | */ |
| 2362 | if (unlikely(init)) { |
| 2363 | int rsize; |
| 2364 | unsigned int inuse, orig_size; |
| 2365 | |
| 2366 | inuse = get_info_end(s); |
| 2367 | orig_size = get_orig_size(s, x); |
| 2368 | if (!kasan_has_integrated_init()) |
| 2369 | memset(kasan_reset_tag(x), 0, orig_size); |
| 2370 | rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; |
| 2371 | memset((char *)kasan_reset_tag(x) + inuse, 0, |
| 2372 | s->size - inuse - rsize); |
| 2373 | /* |
| 2374 | * Restore orig_size, otherwize kmalloc redzone overwritten |
| 2375 | * would be reported |
| 2376 | */ |
| 2377 | set_orig_size(s, x, orig_size); |
| 2378 | |
| 2379 | } |
| 2380 | /* KASAN might put x into memory quarantine, delaying its reuse. */ |
| 2381 | return !kasan_slab_free(s, x, init, still_accessible); |
| 2382 | } |
| 2383 | |
| 2384 | static __fastpath_inline |
| 2385 | bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, |
| 2386 | int *cnt) |
| 2387 | { |
| 2388 | |
| 2389 | void *object; |
| 2390 | void *next = *head; |
| 2391 | void *old_tail = *tail; |
| 2392 | bool init; |
| 2393 | |
| 2394 | if (is_kfence_address(next)) { |
| 2395 | slab_free_hook(s, next, false, false); |
| 2396 | return false; |
| 2397 | } |
| 2398 | |
| 2399 | /* Head and tail of the reconstructed freelist */ |
| 2400 | *head = NULL; |
| 2401 | *tail = NULL; |
| 2402 | |
| 2403 | init = slab_want_init_on_free(s); |
| 2404 | |
| 2405 | do { |
| 2406 | object = next; |
| 2407 | next = get_freepointer(s, object); |
| 2408 | |
| 2409 | /* If object's reuse doesn't have to be delayed */ |
| 2410 | if (likely(slab_free_hook(s, object, init, false))) { |
| 2411 | /* Move object to the new freelist */ |
| 2412 | set_freepointer(s, object, *head); |
| 2413 | *head = object; |
| 2414 | if (!*tail) |
| 2415 | *tail = object; |
| 2416 | } else { |
| 2417 | /* |
| 2418 | * Adjust the reconstructed freelist depth |
| 2419 | * accordingly if object's reuse is delayed. |
| 2420 | */ |
| 2421 | --(*cnt); |
| 2422 | } |
| 2423 | } while (object != old_tail); |
| 2424 | |
| 2425 | return *head != NULL; |
| 2426 | } |
| 2427 | |
| 2428 | static void *setup_object(struct kmem_cache *s, void *object) |
| 2429 | { |
| 2430 | setup_object_debug(s, object); |
| 2431 | object = kasan_init_slab_obj(s, object); |
| 2432 | if (unlikely(s->ctor)) { |
| 2433 | kasan_unpoison_new_object(s, object); |
| 2434 | s->ctor(object); |
| 2435 | kasan_poison_new_object(s, object); |
| 2436 | } |
| 2437 | return object; |
| 2438 | } |
| 2439 | |
| 2440 | /* |
| 2441 | * Slab allocation and freeing |
| 2442 | */ |
| 2443 | static inline struct slab *alloc_slab_page(gfp_t flags, int node, |
| 2444 | struct kmem_cache_order_objects oo) |
| 2445 | { |
| 2446 | struct folio *folio; |
| 2447 | struct slab *slab; |
| 2448 | unsigned int order = oo_order(oo); |
| 2449 | |
| 2450 | if (node == NUMA_NO_NODE) |
| 2451 | folio = (struct folio *)alloc_frozen_pages(flags, order); |
| 2452 | else |
| 2453 | folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL); |
| 2454 | |
| 2455 | if (!folio) |
| 2456 | return NULL; |
| 2457 | |
| 2458 | slab = folio_slab(folio); |
| 2459 | __folio_set_slab(folio); |
| 2460 | if (folio_is_pfmemalloc(folio)) |
| 2461 | slab_set_pfmemalloc(slab); |
| 2462 | |
| 2463 | return slab; |
| 2464 | } |
| 2465 | |
| 2466 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
| 2467 | /* Pre-initialize the random sequence cache */ |
| 2468 | static int init_cache_random_seq(struct kmem_cache *s) |
| 2469 | { |
| 2470 | unsigned int count = oo_objects(s->oo); |
| 2471 | int err; |
| 2472 | |
| 2473 | /* Bailout if already initialised */ |
| 2474 | if (s->random_seq) |
| 2475 | return 0; |
| 2476 | |
| 2477 | err = cache_random_seq_create(s, count, GFP_KERNEL); |
| 2478 | if (err) { |
| 2479 | pr_err("SLUB: Unable to initialize free list for %s\n", |
| 2480 | s->name); |
| 2481 | return err; |
| 2482 | } |
| 2483 | |
| 2484 | /* Transform to an offset on the set of pages */ |
| 2485 | if (s->random_seq) { |
| 2486 | unsigned int i; |
| 2487 | |
| 2488 | for (i = 0; i < count; i++) |
| 2489 | s->random_seq[i] *= s->size; |
| 2490 | } |
| 2491 | return 0; |
| 2492 | } |
| 2493 | |
| 2494 | /* Initialize each random sequence freelist per cache */ |
| 2495 | static void __init init_freelist_randomization(void) |
| 2496 | { |
| 2497 | struct kmem_cache *s; |
| 2498 | |
| 2499 | mutex_lock(&slab_mutex); |
| 2500 | |
| 2501 | list_for_each_entry(s, &slab_caches, list) |
| 2502 | init_cache_random_seq(s); |
| 2503 | |
| 2504 | mutex_unlock(&slab_mutex); |
| 2505 | } |
| 2506 | |
| 2507 | /* Get the next entry on the pre-computed freelist randomized */ |
| 2508 | static void *next_freelist_entry(struct kmem_cache *s, |
| 2509 | unsigned long *pos, void *start, |
| 2510 | unsigned long page_limit, |
| 2511 | unsigned long freelist_count) |
| 2512 | { |
| 2513 | unsigned int idx; |
| 2514 | |
| 2515 | /* |
| 2516 | * If the target page allocation failed, the number of objects on the |
| 2517 | * page might be smaller than the usual size defined by the cache. |
| 2518 | */ |
| 2519 | do { |
| 2520 | idx = s->random_seq[*pos]; |
| 2521 | *pos += 1; |
| 2522 | if (*pos >= freelist_count) |
| 2523 | *pos = 0; |
| 2524 | } while (unlikely(idx >= page_limit)); |
| 2525 | |
| 2526 | return (char *)start + idx; |
| 2527 | } |
| 2528 | |
| 2529 | /* Shuffle the single linked freelist based on a random pre-computed sequence */ |
| 2530 | static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) |
| 2531 | { |
| 2532 | void *start; |
| 2533 | void *cur; |
| 2534 | void *next; |
| 2535 | unsigned long idx, pos, page_limit, freelist_count; |
| 2536 | |
| 2537 | if (slab->objects < 2 || !s->random_seq) |
| 2538 | return false; |
| 2539 | |
| 2540 | freelist_count = oo_objects(s->oo); |
| 2541 | pos = get_random_u32_below(freelist_count); |
| 2542 | |
| 2543 | page_limit = slab->objects * s->size; |
| 2544 | start = fixup_red_left(s, slab_address(slab)); |
| 2545 | |
| 2546 | /* First entry is used as the base of the freelist */ |
| 2547 | cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); |
| 2548 | cur = setup_object(s, cur); |
| 2549 | slab->freelist = cur; |
| 2550 | |
| 2551 | for (idx = 1; idx < slab->objects; idx++) { |
| 2552 | next = next_freelist_entry(s, &pos, start, page_limit, |
| 2553 | freelist_count); |
| 2554 | next = setup_object(s, next); |
| 2555 | set_freepointer(s, cur, next); |
| 2556 | cur = next; |
| 2557 | } |
| 2558 | set_freepointer(s, cur, NULL); |
| 2559 | |
| 2560 | return true; |
| 2561 | } |
| 2562 | #else |
| 2563 | static inline int init_cache_random_seq(struct kmem_cache *s) |
| 2564 | { |
| 2565 | return 0; |
| 2566 | } |
| 2567 | static inline void init_freelist_randomization(void) { } |
| 2568 | static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) |
| 2569 | { |
| 2570 | return false; |
| 2571 | } |
| 2572 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
| 2573 | |
| 2574 | static __always_inline void account_slab(struct slab *slab, int order, |
| 2575 | struct kmem_cache *s, gfp_t gfp) |
| 2576 | { |
| 2577 | if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) |
| 2578 | alloc_slab_obj_exts(slab, s, gfp, true); |
| 2579 | |
| 2580 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
| 2581 | PAGE_SIZE << order); |
| 2582 | } |
| 2583 | |
| 2584 | static __always_inline void unaccount_slab(struct slab *slab, int order, |
| 2585 | struct kmem_cache *s) |
| 2586 | { |
| 2587 | /* |
| 2588 | * The slab object extensions should now be freed regardless of |
| 2589 | * whether mem_alloc_profiling_enabled() or not because profiling |
| 2590 | * might have been disabled after slab->obj_exts got allocated. |
| 2591 | */ |
| 2592 | free_slab_obj_exts(slab); |
| 2593 | |
| 2594 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
| 2595 | -(PAGE_SIZE << order)); |
| 2596 | } |
| 2597 | |
| 2598 | static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) |
| 2599 | { |
| 2600 | struct slab *slab; |
| 2601 | struct kmem_cache_order_objects oo = s->oo; |
| 2602 | gfp_t alloc_gfp; |
| 2603 | void *start, *p, *next; |
| 2604 | int idx; |
| 2605 | bool shuffle; |
| 2606 | |
| 2607 | flags &= gfp_allowed_mask; |
| 2608 | |
| 2609 | flags |= s->allocflags; |
| 2610 | |
| 2611 | /* |
| 2612 | * Let the initial higher-order allocation fail under memory pressure |
| 2613 | * so we fall-back to the minimum order allocation. |
| 2614 | */ |
| 2615 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; |
| 2616 | if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) |
| 2617 | alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; |
| 2618 | |
| 2619 | slab = alloc_slab_page(alloc_gfp, node, oo); |
| 2620 | if (unlikely(!slab)) { |
| 2621 | oo = s->min; |
| 2622 | alloc_gfp = flags; |
| 2623 | /* |
| 2624 | * Allocation may have failed due to fragmentation. |
| 2625 | * Try a lower order alloc if possible |
| 2626 | */ |
| 2627 | slab = alloc_slab_page(alloc_gfp, node, oo); |
| 2628 | if (unlikely(!slab)) |
| 2629 | return NULL; |
| 2630 | stat(s, ORDER_FALLBACK); |
| 2631 | } |
| 2632 | |
| 2633 | slab->objects = oo_objects(oo); |
| 2634 | slab->inuse = 0; |
| 2635 | slab->frozen = 0; |
| 2636 | init_slab_obj_exts(slab); |
| 2637 | |
| 2638 | account_slab(slab, oo_order(oo), s, flags); |
| 2639 | |
| 2640 | slab->slab_cache = s; |
| 2641 | |
| 2642 | kasan_poison_slab(slab); |
| 2643 | |
| 2644 | start = slab_address(slab); |
| 2645 | |
| 2646 | setup_slab_debug(s, slab, start); |
| 2647 | |
| 2648 | shuffle = shuffle_freelist(s, slab); |
| 2649 | |
| 2650 | if (!shuffle) { |
| 2651 | start = fixup_red_left(s, start); |
| 2652 | start = setup_object(s, start); |
| 2653 | slab->freelist = start; |
| 2654 | for (idx = 0, p = start; idx < slab->objects - 1; idx++) { |
| 2655 | next = p + s->size; |
| 2656 | next = setup_object(s, next); |
| 2657 | set_freepointer(s, p, next); |
| 2658 | p = next; |
| 2659 | } |
| 2660 | set_freepointer(s, p, NULL); |
| 2661 | } |
| 2662 | |
| 2663 | return slab; |
| 2664 | } |
| 2665 | |
| 2666 | static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
| 2667 | { |
| 2668 | if (unlikely(flags & GFP_SLAB_BUG_MASK)) |
| 2669 | flags = kmalloc_fix_flags(flags); |
| 2670 | |
| 2671 | WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); |
| 2672 | |
| 2673 | return allocate_slab(s, |
| 2674 | flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); |
| 2675 | } |
| 2676 | |
| 2677 | static void __free_slab(struct kmem_cache *s, struct slab *slab) |
| 2678 | { |
| 2679 | struct folio *folio = slab_folio(slab); |
| 2680 | int order = folio_order(folio); |
| 2681 | int pages = 1 << order; |
| 2682 | |
| 2683 | __slab_clear_pfmemalloc(slab); |
| 2684 | folio->mapping = NULL; |
| 2685 | __folio_clear_slab(folio); |
| 2686 | mm_account_reclaimed_pages(pages); |
| 2687 | unaccount_slab(slab, order, s); |
| 2688 | free_frozen_pages(&folio->page, order); |
| 2689 | } |
| 2690 | |
| 2691 | static void rcu_free_slab(struct rcu_head *h) |
| 2692 | { |
| 2693 | struct slab *slab = container_of(h, struct slab, rcu_head); |
| 2694 | |
| 2695 | __free_slab(slab->slab_cache, slab); |
| 2696 | } |
| 2697 | |
| 2698 | static void free_slab(struct kmem_cache *s, struct slab *slab) |
| 2699 | { |
| 2700 | if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { |
| 2701 | void *p; |
| 2702 | |
| 2703 | slab_pad_check(s, slab); |
| 2704 | for_each_object(p, s, slab_address(slab), slab->objects) |
| 2705 | check_object(s, slab, p, SLUB_RED_INACTIVE); |
| 2706 | } |
| 2707 | |
| 2708 | if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) |
| 2709 | call_rcu(&slab->rcu_head, rcu_free_slab); |
| 2710 | else |
| 2711 | __free_slab(s, slab); |
| 2712 | } |
| 2713 | |
| 2714 | static void discard_slab(struct kmem_cache *s, struct slab *slab) |
| 2715 | { |
| 2716 | dec_slabs_node(s, slab_nid(slab), slab->objects); |
| 2717 | free_slab(s, slab); |
| 2718 | } |
| 2719 | |
| 2720 | /* |
| 2721 | * SLUB reuses PG_workingset bit to keep track of whether it's on |
| 2722 | * the per-node partial list. |
| 2723 | */ |
| 2724 | static inline bool slab_test_node_partial(const struct slab *slab) |
| 2725 | { |
| 2726 | return folio_test_workingset(slab_folio(slab)); |
| 2727 | } |
| 2728 | |
| 2729 | static inline void slab_set_node_partial(struct slab *slab) |
| 2730 | { |
| 2731 | set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); |
| 2732 | } |
| 2733 | |
| 2734 | static inline void slab_clear_node_partial(struct slab *slab) |
| 2735 | { |
| 2736 | clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); |
| 2737 | } |
| 2738 | |
| 2739 | /* |
| 2740 | * Management of partially allocated slabs. |
| 2741 | */ |
| 2742 | static inline void |
| 2743 | __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) |
| 2744 | { |
| 2745 | n->nr_partial++; |
| 2746 | if (tail == DEACTIVATE_TO_TAIL) |
| 2747 | list_add_tail(&slab->slab_list, &n->partial); |
| 2748 | else |
| 2749 | list_add(&slab->slab_list, &n->partial); |
| 2750 | slab_set_node_partial(slab); |
| 2751 | } |
| 2752 | |
| 2753 | static inline void add_partial(struct kmem_cache_node *n, |
| 2754 | struct slab *slab, int tail) |
| 2755 | { |
| 2756 | lockdep_assert_held(&n->list_lock); |
| 2757 | __add_partial(n, slab, tail); |
| 2758 | } |
| 2759 | |
| 2760 | static inline void remove_partial(struct kmem_cache_node *n, |
| 2761 | struct slab *slab) |
| 2762 | { |
| 2763 | lockdep_assert_held(&n->list_lock); |
| 2764 | list_del(&slab->slab_list); |
| 2765 | slab_clear_node_partial(slab); |
| 2766 | n->nr_partial--; |
| 2767 | } |
| 2768 | |
| 2769 | /* |
| 2770 | * Called only for kmem_cache_debug() caches instead of remove_partial(), with a |
| 2771 | * slab from the n->partial list. Remove only a single object from the slab, do |
| 2772 | * the alloc_debug_processing() checks and leave the slab on the list, or move |
| 2773 | * it to full list if it was the last free object. |
| 2774 | */ |
| 2775 | static void *alloc_single_from_partial(struct kmem_cache *s, |
| 2776 | struct kmem_cache_node *n, struct slab *slab, int orig_size) |
| 2777 | { |
| 2778 | void *object; |
| 2779 | |
| 2780 | lockdep_assert_held(&n->list_lock); |
| 2781 | |
| 2782 | object = slab->freelist; |
| 2783 | slab->freelist = get_freepointer(s, object); |
| 2784 | slab->inuse++; |
| 2785 | |
| 2786 | if (!alloc_debug_processing(s, slab, object, orig_size)) { |
| 2787 | if (folio_test_slab(slab_folio(slab))) |
| 2788 | remove_partial(n, slab); |
| 2789 | return NULL; |
| 2790 | } |
| 2791 | |
| 2792 | if (slab->inuse == slab->objects) { |
| 2793 | remove_partial(n, slab); |
| 2794 | add_full(s, n, slab); |
| 2795 | } |
| 2796 | |
| 2797 | return object; |
| 2798 | } |
| 2799 | |
| 2800 | /* |
| 2801 | * Called only for kmem_cache_debug() caches to allocate from a freshly |
| 2802 | * allocated slab. Allocate a single object instead of whole freelist |
| 2803 | * and put the slab to the partial (or full) list. |
| 2804 | */ |
| 2805 | static void *alloc_single_from_new_slab(struct kmem_cache *s, |
| 2806 | struct slab *slab, int orig_size) |
| 2807 | { |
| 2808 | int nid = slab_nid(slab); |
| 2809 | struct kmem_cache_node *n = get_node(s, nid); |
| 2810 | unsigned long flags; |
| 2811 | void *object; |
| 2812 | |
| 2813 | |
| 2814 | object = slab->freelist; |
| 2815 | slab->freelist = get_freepointer(s, object); |
| 2816 | slab->inuse = 1; |
| 2817 | |
| 2818 | if (!alloc_debug_processing(s, slab, object, orig_size)) |
| 2819 | /* |
| 2820 | * It's not really expected that this would fail on a |
| 2821 | * freshly allocated slab, but a concurrent memory |
| 2822 | * corruption in theory could cause that. |
| 2823 | */ |
| 2824 | return NULL; |
| 2825 | |
| 2826 | spin_lock_irqsave(&n->list_lock, flags); |
| 2827 | |
| 2828 | if (slab->inuse == slab->objects) |
| 2829 | add_full(s, n, slab); |
| 2830 | else |
| 2831 | add_partial(n, slab, DEACTIVATE_TO_HEAD); |
| 2832 | |
| 2833 | inc_slabs_node(s, nid, slab->objects); |
| 2834 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 2835 | |
| 2836 | return object; |
| 2837 | } |
| 2838 | |
| 2839 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 2840 | static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); |
| 2841 | #else |
| 2842 | static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, |
| 2843 | int drain) { } |
| 2844 | #endif |
| 2845 | static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); |
| 2846 | |
| 2847 | /* |
| 2848 | * Try to allocate a partial slab from a specific node. |
| 2849 | */ |
| 2850 | static struct slab *get_partial_node(struct kmem_cache *s, |
| 2851 | struct kmem_cache_node *n, |
| 2852 | struct partial_context *pc) |
| 2853 | { |
| 2854 | struct slab *slab, *slab2, *partial = NULL; |
| 2855 | unsigned long flags; |
| 2856 | unsigned int partial_slabs = 0; |
| 2857 | |
| 2858 | /* |
| 2859 | * Racy check. If we mistakenly see no partial slabs then we |
| 2860 | * just allocate an empty slab. If we mistakenly try to get a |
| 2861 | * partial slab and there is none available then get_partial() |
| 2862 | * will return NULL. |
| 2863 | */ |
| 2864 | if (!n || !n->nr_partial) |
| 2865 | return NULL; |
| 2866 | |
| 2867 | spin_lock_irqsave(&n->list_lock, flags); |
| 2868 | list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { |
| 2869 | if (!pfmemalloc_match(slab, pc->flags)) |
| 2870 | continue; |
| 2871 | |
| 2872 | if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { |
| 2873 | void *object = alloc_single_from_partial(s, n, slab, |
| 2874 | pc->orig_size); |
| 2875 | if (object) { |
| 2876 | partial = slab; |
| 2877 | pc->object = object; |
| 2878 | break; |
| 2879 | } |
| 2880 | continue; |
| 2881 | } |
| 2882 | |
| 2883 | remove_partial(n, slab); |
| 2884 | |
| 2885 | if (!partial) { |
| 2886 | partial = slab; |
| 2887 | stat(s, ALLOC_FROM_PARTIAL); |
| 2888 | |
| 2889 | if ((slub_get_cpu_partial(s) == 0)) { |
| 2890 | break; |
| 2891 | } |
| 2892 | } else { |
| 2893 | put_cpu_partial(s, slab, 0); |
| 2894 | stat(s, CPU_PARTIAL_NODE); |
| 2895 | |
| 2896 | if (++partial_slabs > slub_get_cpu_partial(s) / 2) { |
| 2897 | break; |
| 2898 | } |
| 2899 | } |
| 2900 | } |
| 2901 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 2902 | return partial; |
| 2903 | } |
| 2904 | |
| 2905 | /* |
| 2906 | * Get a slab from somewhere. Search in increasing NUMA distances. |
| 2907 | */ |
| 2908 | static struct slab *get_any_partial(struct kmem_cache *s, |
| 2909 | struct partial_context *pc) |
| 2910 | { |
| 2911 | #ifdef CONFIG_NUMA |
| 2912 | struct zonelist *zonelist; |
| 2913 | struct zoneref *z; |
| 2914 | struct zone *zone; |
| 2915 | enum zone_type highest_zoneidx = gfp_zone(pc->flags); |
| 2916 | struct slab *slab; |
| 2917 | unsigned int cpuset_mems_cookie; |
| 2918 | |
| 2919 | /* |
| 2920 | * The defrag ratio allows a configuration of the tradeoffs between |
| 2921 | * inter node defragmentation and node local allocations. A lower |
| 2922 | * defrag_ratio increases the tendency to do local allocations |
| 2923 | * instead of attempting to obtain partial slabs from other nodes. |
| 2924 | * |
| 2925 | * If the defrag_ratio is set to 0 then kmalloc() always |
| 2926 | * returns node local objects. If the ratio is higher then kmalloc() |
| 2927 | * may return off node objects because partial slabs are obtained |
| 2928 | * from other nodes and filled up. |
| 2929 | * |
| 2930 | * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 |
| 2931 | * (which makes defrag_ratio = 1000) then every (well almost) |
| 2932 | * allocation will first attempt to defrag slab caches on other nodes. |
| 2933 | * This means scanning over all nodes to look for partial slabs which |
| 2934 | * may be expensive if we do it every time we are trying to find a slab |
| 2935 | * with available objects. |
| 2936 | */ |
| 2937 | if (!s->remote_node_defrag_ratio || |
| 2938 | get_cycles() % 1024 > s->remote_node_defrag_ratio) |
| 2939 | return NULL; |
| 2940 | |
| 2941 | do { |
| 2942 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 2943 | zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); |
| 2944 | for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { |
| 2945 | struct kmem_cache_node *n; |
| 2946 | |
| 2947 | n = get_node(s, zone_to_nid(zone)); |
| 2948 | |
| 2949 | if (n && cpuset_zone_allowed(zone, pc->flags) && |
| 2950 | n->nr_partial > s->min_partial) { |
| 2951 | slab = get_partial_node(s, n, pc); |
| 2952 | if (slab) { |
| 2953 | /* |
| 2954 | * Don't check read_mems_allowed_retry() |
| 2955 | * here - if mems_allowed was updated in |
| 2956 | * parallel, that was a harmless race |
| 2957 | * between allocation and the cpuset |
| 2958 | * update |
| 2959 | */ |
| 2960 | return slab; |
| 2961 | } |
| 2962 | } |
| 2963 | } |
| 2964 | } while (read_mems_allowed_retry(cpuset_mems_cookie)); |
| 2965 | #endif /* CONFIG_NUMA */ |
| 2966 | return NULL; |
| 2967 | } |
| 2968 | |
| 2969 | /* |
| 2970 | * Get a partial slab, lock it and return it. |
| 2971 | */ |
| 2972 | static struct slab *get_partial(struct kmem_cache *s, int node, |
| 2973 | struct partial_context *pc) |
| 2974 | { |
| 2975 | struct slab *slab; |
| 2976 | int searchnode = node; |
| 2977 | |
| 2978 | if (node == NUMA_NO_NODE) |
| 2979 | searchnode = numa_mem_id(); |
| 2980 | |
| 2981 | slab = get_partial_node(s, get_node(s, searchnode), pc); |
| 2982 | if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) |
| 2983 | return slab; |
| 2984 | |
| 2985 | return get_any_partial(s, pc); |
| 2986 | } |
| 2987 | |
| 2988 | #ifndef CONFIG_SLUB_TINY |
| 2989 | |
| 2990 | #ifdef CONFIG_PREEMPTION |
| 2991 | /* |
| 2992 | * Calculate the next globally unique transaction for disambiguation |
| 2993 | * during cmpxchg. The transactions start with the cpu number and are then |
| 2994 | * incremented by CONFIG_NR_CPUS. |
| 2995 | */ |
| 2996 | #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) |
| 2997 | #else |
| 2998 | /* |
| 2999 | * No preemption supported therefore also no need to check for |
| 3000 | * different cpus. |
| 3001 | */ |
| 3002 | #define TID_STEP 1 |
| 3003 | #endif /* CONFIG_PREEMPTION */ |
| 3004 | |
| 3005 | static inline unsigned long next_tid(unsigned long tid) |
| 3006 | { |
| 3007 | return tid + TID_STEP; |
| 3008 | } |
| 3009 | |
| 3010 | #ifdef SLUB_DEBUG_CMPXCHG |
| 3011 | static inline unsigned int tid_to_cpu(unsigned long tid) |
| 3012 | { |
| 3013 | return tid % TID_STEP; |
| 3014 | } |
| 3015 | |
| 3016 | static inline unsigned long tid_to_event(unsigned long tid) |
| 3017 | { |
| 3018 | return tid / TID_STEP; |
| 3019 | } |
| 3020 | #endif |
| 3021 | |
| 3022 | static inline unsigned int init_tid(int cpu) |
| 3023 | { |
| 3024 | return cpu; |
| 3025 | } |
| 3026 | |
| 3027 | static inline void note_cmpxchg_failure(const char *n, |
| 3028 | const struct kmem_cache *s, unsigned long tid) |
| 3029 | { |
| 3030 | #ifdef SLUB_DEBUG_CMPXCHG |
| 3031 | unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); |
| 3032 | |
| 3033 | pr_info("%s %s: cmpxchg redo ", n, s->name); |
| 3034 | |
| 3035 | #ifdef CONFIG_PREEMPTION |
| 3036 | if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) |
| 3037 | pr_warn("due to cpu change %d -> %d\n", |
| 3038 | tid_to_cpu(tid), tid_to_cpu(actual_tid)); |
| 3039 | else |
| 3040 | #endif |
| 3041 | if (tid_to_event(tid) != tid_to_event(actual_tid)) |
| 3042 | pr_warn("due to cpu running other code. Event %ld->%ld\n", |
| 3043 | tid_to_event(tid), tid_to_event(actual_tid)); |
| 3044 | else |
| 3045 | pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", |
| 3046 | actual_tid, tid, next_tid(tid)); |
| 3047 | #endif |
| 3048 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
| 3049 | } |
| 3050 | |
| 3051 | static void init_kmem_cache_cpus(struct kmem_cache *s) |
| 3052 | { |
| 3053 | int cpu; |
| 3054 | struct kmem_cache_cpu *c; |
| 3055 | |
| 3056 | for_each_possible_cpu(cpu) { |
| 3057 | c = per_cpu_ptr(s->cpu_slab, cpu); |
| 3058 | local_lock_init(&c->lock); |
| 3059 | c->tid = init_tid(cpu); |
| 3060 | } |
| 3061 | } |
| 3062 | |
| 3063 | /* |
| 3064 | * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, |
| 3065 | * unfreezes the slabs and puts it on the proper list. |
| 3066 | * Assumes the slab has been already safely taken away from kmem_cache_cpu |
| 3067 | * by the caller. |
| 3068 | */ |
| 3069 | static void deactivate_slab(struct kmem_cache *s, struct slab *slab, |
| 3070 | void *freelist) |
| 3071 | { |
| 3072 | struct kmem_cache_node *n = get_node(s, slab_nid(slab)); |
| 3073 | int free_delta = 0; |
| 3074 | void *nextfree, *freelist_iter, *freelist_tail; |
| 3075 | int tail = DEACTIVATE_TO_HEAD; |
| 3076 | unsigned long flags = 0; |
| 3077 | struct slab new; |
| 3078 | struct slab old; |
| 3079 | |
| 3080 | if (READ_ONCE(slab->freelist)) { |
| 3081 | stat(s, DEACTIVATE_REMOTE_FREES); |
| 3082 | tail = DEACTIVATE_TO_TAIL; |
| 3083 | } |
| 3084 | |
| 3085 | /* |
| 3086 | * Stage one: Count the objects on cpu's freelist as free_delta and |
| 3087 | * remember the last object in freelist_tail for later splicing. |
| 3088 | */ |
| 3089 | freelist_tail = NULL; |
| 3090 | freelist_iter = freelist; |
| 3091 | while (freelist_iter) { |
| 3092 | nextfree = get_freepointer(s, freelist_iter); |
| 3093 | |
| 3094 | /* |
| 3095 | * If 'nextfree' is invalid, it is possible that the object at |
| 3096 | * 'freelist_iter' is already corrupted. So isolate all objects |
| 3097 | * starting at 'freelist_iter' by skipping them. |
| 3098 | */ |
| 3099 | if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) |
| 3100 | break; |
| 3101 | |
| 3102 | freelist_tail = freelist_iter; |
| 3103 | free_delta++; |
| 3104 | |
| 3105 | freelist_iter = nextfree; |
| 3106 | } |
| 3107 | |
| 3108 | /* |
| 3109 | * Stage two: Unfreeze the slab while splicing the per-cpu |
| 3110 | * freelist to the head of slab's freelist. |
| 3111 | */ |
| 3112 | do { |
| 3113 | old.freelist = READ_ONCE(slab->freelist); |
| 3114 | old.counters = READ_ONCE(slab->counters); |
| 3115 | VM_BUG_ON(!old.frozen); |
| 3116 | |
| 3117 | /* Determine target state of the slab */ |
| 3118 | new.counters = old.counters; |
| 3119 | new.frozen = 0; |
| 3120 | if (freelist_tail) { |
| 3121 | new.inuse -= free_delta; |
| 3122 | set_freepointer(s, freelist_tail, old.freelist); |
| 3123 | new.freelist = freelist; |
| 3124 | } else { |
| 3125 | new.freelist = old.freelist; |
| 3126 | } |
| 3127 | } while (!slab_update_freelist(s, slab, |
| 3128 | old.freelist, old.counters, |
| 3129 | new.freelist, new.counters, |
| 3130 | "unfreezing slab")); |
| 3131 | |
| 3132 | /* |
| 3133 | * Stage three: Manipulate the slab list based on the updated state. |
| 3134 | */ |
| 3135 | if (!new.inuse && n->nr_partial >= s->min_partial) { |
| 3136 | stat(s, DEACTIVATE_EMPTY); |
| 3137 | discard_slab(s, slab); |
| 3138 | stat(s, FREE_SLAB); |
| 3139 | } else if (new.freelist) { |
| 3140 | spin_lock_irqsave(&n->list_lock, flags); |
| 3141 | add_partial(n, slab, tail); |
| 3142 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 3143 | stat(s, tail); |
| 3144 | } else { |
| 3145 | stat(s, DEACTIVATE_FULL); |
| 3146 | } |
| 3147 | } |
| 3148 | |
| 3149 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 3150 | static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) |
| 3151 | { |
| 3152 | struct kmem_cache_node *n = NULL, *n2 = NULL; |
| 3153 | struct slab *slab, *slab_to_discard = NULL; |
| 3154 | unsigned long flags = 0; |
| 3155 | |
| 3156 | while (partial_slab) { |
| 3157 | slab = partial_slab; |
| 3158 | partial_slab = slab->next; |
| 3159 | |
| 3160 | n2 = get_node(s, slab_nid(slab)); |
| 3161 | if (n != n2) { |
| 3162 | if (n) |
| 3163 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 3164 | |
| 3165 | n = n2; |
| 3166 | spin_lock_irqsave(&n->list_lock, flags); |
| 3167 | } |
| 3168 | |
| 3169 | if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { |
| 3170 | slab->next = slab_to_discard; |
| 3171 | slab_to_discard = slab; |
| 3172 | } else { |
| 3173 | add_partial(n, slab, DEACTIVATE_TO_TAIL); |
| 3174 | stat(s, FREE_ADD_PARTIAL); |
| 3175 | } |
| 3176 | } |
| 3177 | |
| 3178 | if (n) |
| 3179 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 3180 | |
| 3181 | while (slab_to_discard) { |
| 3182 | slab = slab_to_discard; |
| 3183 | slab_to_discard = slab_to_discard->next; |
| 3184 | |
| 3185 | stat(s, DEACTIVATE_EMPTY); |
| 3186 | discard_slab(s, slab); |
| 3187 | stat(s, FREE_SLAB); |
| 3188 | } |
| 3189 | } |
| 3190 | |
| 3191 | /* |
| 3192 | * Put all the cpu partial slabs to the node partial list. |
| 3193 | */ |
| 3194 | static void put_partials(struct kmem_cache *s) |
| 3195 | { |
| 3196 | struct slab *partial_slab; |
| 3197 | unsigned long flags; |
| 3198 | |
| 3199 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3200 | partial_slab = this_cpu_read(s->cpu_slab->partial); |
| 3201 | this_cpu_write(s->cpu_slab->partial, NULL); |
| 3202 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3203 | |
| 3204 | if (partial_slab) |
| 3205 | __put_partials(s, partial_slab); |
| 3206 | } |
| 3207 | |
| 3208 | static void put_partials_cpu(struct kmem_cache *s, |
| 3209 | struct kmem_cache_cpu *c) |
| 3210 | { |
| 3211 | struct slab *partial_slab; |
| 3212 | |
| 3213 | partial_slab = slub_percpu_partial(c); |
| 3214 | c->partial = NULL; |
| 3215 | |
| 3216 | if (partial_slab) |
| 3217 | __put_partials(s, partial_slab); |
| 3218 | } |
| 3219 | |
| 3220 | /* |
| 3221 | * Put a slab into a partial slab slot if available. |
| 3222 | * |
| 3223 | * If we did not find a slot then simply move all the partials to the |
| 3224 | * per node partial list. |
| 3225 | */ |
| 3226 | static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) |
| 3227 | { |
| 3228 | struct slab *oldslab; |
| 3229 | struct slab *slab_to_put = NULL; |
| 3230 | unsigned long flags; |
| 3231 | int slabs = 0; |
| 3232 | |
| 3233 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3234 | |
| 3235 | oldslab = this_cpu_read(s->cpu_slab->partial); |
| 3236 | |
| 3237 | if (oldslab) { |
| 3238 | if (drain && oldslab->slabs >= s->cpu_partial_slabs) { |
| 3239 | /* |
| 3240 | * Partial array is full. Move the existing set to the |
| 3241 | * per node partial list. Postpone the actual unfreezing |
| 3242 | * outside of the critical section. |
| 3243 | */ |
| 3244 | slab_to_put = oldslab; |
| 3245 | oldslab = NULL; |
| 3246 | } else { |
| 3247 | slabs = oldslab->slabs; |
| 3248 | } |
| 3249 | } |
| 3250 | |
| 3251 | slabs++; |
| 3252 | |
| 3253 | slab->slabs = slabs; |
| 3254 | slab->next = oldslab; |
| 3255 | |
| 3256 | this_cpu_write(s->cpu_slab->partial, slab); |
| 3257 | |
| 3258 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3259 | |
| 3260 | if (slab_to_put) { |
| 3261 | __put_partials(s, slab_to_put); |
| 3262 | stat(s, CPU_PARTIAL_DRAIN); |
| 3263 | } |
| 3264 | } |
| 3265 | |
| 3266 | #else /* CONFIG_SLUB_CPU_PARTIAL */ |
| 3267 | |
| 3268 | static inline void put_partials(struct kmem_cache *s) { } |
| 3269 | static inline void put_partials_cpu(struct kmem_cache *s, |
| 3270 | struct kmem_cache_cpu *c) { } |
| 3271 | |
| 3272 | #endif /* CONFIG_SLUB_CPU_PARTIAL */ |
| 3273 | |
| 3274 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
| 3275 | { |
| 3276 | unsigned long flags; |
| 3277 | struct slab *slab; |
| 3278 | void *freelist; |
| 3279 | |
| 3280 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3281 | |
| 3282 | slab = c->slab; |
| 3283 | freelist = c->freelist; |
| 3284 | |
| 3285 | c->slab = NULL; |
| 3286 | c->freelist = NULL; |
| 3287 | c->tid = next_tid(c->tid); |
| 3288 | |
| 3289 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3290 | |
| 3291 | if (slab) { |
| 3292 | deactivate_slab(s, slab, freelist); |
| 3293 | stat(s, CPUSLAB_FLUSH); |
| 3294 | } |
| 3295 | } |
| 3296 | |
| 3297 | static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) |
| 3298 | { |
| 3299 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
| 3300 | void *freelist = c->freelist; |
| 3301 | struct slab *slab = c->slab; |
| 3302 | |
| 3303 | c->slab = NULL; |
| 3304 | c->freelist = NULL; |
| 3305 | c->tid = next_tid(c->tid); |
| 3306 | |
| 3307 | if (slab) { |
| 3308 | deactivate_slab(s, slab, freelist); |
| 3309 | stat(s, CPUSLAB_FLUSH); |
| 3310 | } |
| 3311 | |
| 3312 | put_partials_cpu(s, c); |
| 3313 | } |
| 3314 | |
| 3315 | struct slub_flush_work { |
| 3316 | struct work_struct work; |
| 3317 | struct kmem_cache *s; |
| 3318 | bool skip; |
| 3319 | }; |
| 3320 | |
| 3321 | /* |
| 3322 | * Flush cpu slab. |
| 3323 | * |
| 3324 | * Called from CPU work handler with migration disabled. |
| 3325 | */ |
| 3326 | static void flush_cpu_slab(struct work_struct *w) |
| 3327 | { |
| 3328 | struct kmem_cache *s; |
| 3329 | struct kmem_cache_cpu *c; |
| 3330 | struct slub_flush_work *sfw; |
| 3331 | |
| 3332 | sfw = container_of(w, struct slub_flush_work, work); |
| 3333 | |
| 3334 | s = sfw->s; |
| 3335 | c = this_cpu_ptr(s->cpu_slab); |
| 3336 | |
| 3337 | if (c->slab) |
| 3338 | flush_slab(s, c); |
| 3339 | |
| 3340 | put_partials(s); |
| 3341 | } |
| 3342 | |
| 3343 | static bool has_cpu_slab(int cpu, struct kmem_cache *s) |
| 3344 | { |
| 3345 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
| 3346 | |
| 3347 | return c->slab || slub_percpu_partial(c); |
| 3348 | } |
| 3349 | |
| 3350 | static DEFINE_MUTEX(flush_lock); |
| 3351 | static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); |
| 3352 | |
| 3353 | static void flush_all_cpus_locked(struct kmem_cache *s) |
| 3354 | { |
| 3355 | struct slub_flush_work *sfw; |
| 3356 | unsigned int cpu; |
| 3357 | |
| 3358 | lockdep_assert_cpus_held(); |
| 3359 | mutex_lock(&flush_lock); |
| 3360 | |
| 3361 | for_each_online_cpu(cpu) { |
| 3362 | sfw = &per_cpu(slub_flush, cpu); |
| 3363 | if (!has_cpu_slab(cpu, s)) { |
| 3364 | sfw->skip = true; |
| 3365 | continue; |
| 3366 | } |
| 3367 | INIT_WORK(&sfw->work, flush_cpu_slab); |
| 3368 | sfw->skip = false; |
| 3369 | sfw->s = s; |
| 3370 | queue_work_on(cpu, flushwq, &sfw->work); |
| 3371 | } |
| 3372 | |
| 3373 | for_each_online_cpu(cpu) { |
| 3374 | sfw = &per_cpu(slub_flush, cpu); |
| 3375 | if (sfw->skip) |
| 3376 | continue; |
| 3377 | flush_work(&sfw->work); |
| 3378 | } |
| 3379 | |
| 3380 | mutex_unlock(&flush_lock); |
| 3381 | } |
| 3382 | |
| 3383 | static void flush_all(struct kmem_cache *s) |
| 3384 | { |
| 3385 | cpus_read_lock(); |
| 3386 | flush_all_cpus_locked(s); |
| 3387 | cpus_read_unlock(); |
| 3388 | } |
| 3389 | |
| 3390 | /* |
| 3391 | * Use the cpu notifier to insure that the cpu slabs are flushed when |
| 3392 | * necessary. |
| 3393 | */ |
| 3394 | static int slub_cpu_dead(unsigned int cpu) |
| 3395 | { |
| 3396 | struct kmem_cache *s; |
| 3397 | |
| 3398 | mutex_lock(&slab_mutex); |
| 3399 | list_for_each_entry(s, &slab_caches, list) |
| 3400 | __flush_cpu_slab(s, cpu); |
| 3401 | mutex_unlock(&slab_mutex); |
| 3402 | return 0; |
| 3403 | } |
| 3404 | |
| 3405 | #else /* CONFIG_SLUB_TINY */ |
| 3406 | static inline void flush_all_cpus_locked(struct kmem_cache *s) { } |
| 3407 | static inline void flush_all(struct kmem_cache *s) { } |
| 3408 | static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } |
| 3409 | static inline int slub_cpu_dead(unsigned int cpu) { return 0; } |
| 3410 | #endif /* CONFIG_SLUB_TINY */ |
| 3411 | |
| 3412 | /* |
| 3413 | * Check if the objects in a per cpu structure fit numa |
| 3414 | * locality expectations. |
| 3415 | */ |
| 3416 | static inline int node_match(struct slab *slab, int node) |
| 3417 | { |
| 3418 | #ifdef CONFIG_NUMA |
| 3419 | if (node != NUMA_NO_NODE && slab_nid(slab) != node) |
| 3420 | return 0; |
| 3421 | #endif |
| 3422 | return 1; |
| 3423 | } |
| 3424 | |
| 3425 | #ifdef CONFIG_SLUB_DEBUG |
| 3426 | static int count_free(struct slab *slab) |
| 3427 | { |
| 3428 | return slab->objects - slab->inuse; |
| 3429 | } |
| 3430 | |
| 3431 | static inline unsigned long node_nr_objs(struct kmem_cache_node *n) |
| 3432 | { |
| 3433 | return atomic_long_read(&n->total_objects); |
| 3434 | } |
| 3435 | |
| 3436 | /* Supports checking bulk free of a constructed freelist */ |
| 3437 | static inline bool free_debug_processing(struct kmem_cache *s, |
| 3438 | struct slab *slab, void *head, void *tail, int *bulk_cnt, |
| 3439 | unsigned long addr, depot_stack_handle_t handle) |
| 3440 | { |
| 3441 | bool checks_ok = false; |
| 3442 | void *object = head; |
| 3443 | int cnt = 0; |
| 3444 | |
| 3445 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
| 3446 | if (!check_slab(s, slab)) |
| 3447 | goto out; |
| 3448 | } |
| 3449 | |
| 3450 | if (slab->inuse < *bulk_cnt) { |
| 3451 | slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", |
| 3452 | slab->inuse, *bulk_cnt); |
| 3453 | goto out; |
| 3454 | } |
| 3455 | |
| 3456 | next_object: |
| 3457 | |
| 3458 | if (++cnt > *bulk_cnt) |
| 3459 | goto out_cnt; |
| 3460 | |
| 3461 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
| 3462 | if (!free_consistency_checks(s, slab, object, addr)) |
| 3463 | goto out; |
| 3464 | } |
| 3465 | |
| 3466 | if (s->flags & SLAB_STORE_USER) |
| 3467 | set_track_update(s, object, TRACK_FREE, addr, handle); |
| 3468 | trace(s, slab, object, 0); |
| 3469 | /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ |
| 3470 | init_object(s, object, SLUB_RED_INACTIVE); |
| 3471 | |
| 3472 | /* Reached end of constructed freelist yet? */ |
| 3473 | if (object != tail) { |
| 3474 | object = get_freepointer(s, object); |
| 3475 | goto next_object; |
| 3476 | } |
| 3477 | checks_ok = true; |
| 3478 | |
| 3479 | out_cnt: |
| 3480 | if (cnt != *bulk_cnt) { |
| 3481 | slab_err(s, slab, "Bulk free expected %d objects but found %d\n", |
| 3482 | *bulk_cnt, cnt); |
| 3483 | *bulk_cnt = cnt; |
| 3484 | } |
| 3485 | |
| 3486 | out: |
| 3487 | |
| 3488 | if (!checks_ok) |
| 3489 | slab_fix(s, "Object at 0x%p not freed", object); |
| 3490 | |
| 3491 | return checks_ok; |
| 3492 | } |
| 3493 | #endif /* CONFIG_SLUB_DEBUG */ |
| 3494 | |
| 3495 | #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) |
| 3496 | static unsigned long count_partial(struct kmem_cache_node *n, |
| 3497 | int (*get_count)(struct slab *)) |
| 3498 | { |
| 3499 | unsigned long flags; |
| 3500 | unsigned long x = 0; |
| 3501 | struct slab *slab; |
| 3502 | |
| 3503 | spin_lock_irqsave(&n->list_lock, flags); |
| 3504 | list_for_each_entry(slab, &n->partial, slab_list) |
| 3505 | x += get_count(slab); |
| 3506 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 3507 | return x; |
| 3508 | } |
| 3509 | #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ |
| 3510 | |
| 3511 | #ifdef CONFIG_SLUB_DEBUG |
| 3512 | #define MAX_PARTIAL_TO_SCAN 10000 |
| 3513 | |
| 3514 | static unsigned long count_partial_free_approx(struct kmem_cache_node *n) |
| 3515 | { |
| 3516 | unsigned long flags; |
| 3517 | unsigned long x = 0; |
| 3518 | struct slab *slab; |
| 3519 | |
| 3520 | spin_lock_irqsave(&n->list_lock, flags); |
| 3521 | if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { |
| 3522 | list_for_each_entry(slab, &n->partial, slab_list) |
| 3523 | x += slab->objects - slab->inuse; |
| 3524 | } else { |
| 3525 | /* |
| 3526 | * For a long list, approximate the total count of objects in |
| 3527 | * it to meet the limit on the number of slabs to scan. |
| 3528 | * Scan from both the list's head and tail for better accuracy. |
| 3529 | */ |
| 3530 | unsigned long scanned = 0; |
| 3531 | |
| 3532 | list_for_each_entry(slab, &n->partial, slab_list) { |
| 3533 | x += slab->objects - slab->inuse; |
| 3534 | if (++scanned == MAX_PARTIAL_TO_SCAN / 2) |
| 3535 | break; |
| 3536 | } |
| 3537 | list_for_each_entry_reverse(slab, &n->partial, slab_list) { |
| 3538 | x += slab->objects - slab->inuse; |
| 3539 | if (++scanned == MAX_PARTIAL_TO_SCAN) |
| 3540 | break; |
| 3541 | } |
| 3542 | x = mult_frac(x, n->nr_partial, scanned); |
| 3543 | x = min(x, node_nr_objs(n)); |
| 3544 | } |
| 3545 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 3546 | return x; |
| 3547 | } |
| 3548 | |
| 3549 | static noinline void |
| 3550 | slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) |
| 3551 | { |
| 3552 | static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
| 3553 | DEFAULT_RATELIMIT_BURST); |
| 3554 | int cpu = raw_smp_processor_id(); |
| 3555 | int node; |
| 3556 | struct kmem_cache_node *n; |
| 3557 | |
| 3558 | if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) |
| 3559 | return; |
| 3560 | |
| 3561 | pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n", |
| 3562 | cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags); |
| 3563 | pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", |
| 3564 | s->name, s->object_size, s->size, oo_order(s->oo), |
| 3565 | oo_order(s->min)); |
| 3566 | |
| 3567 | if (oo_order(s->min) > get_order(s->object_size)) |
| 3568 | pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", |
| 3569 | s->name); |
| 3570 | |
| 3571 | for_each_kmem_cache_node(s, node, n) { |
| 3572 | unsigned long nr_slabs; |
| 3573 | unsigned long nr_objs; |
| 3574 | unsigned long nr_free; |
| 3575 | |
| 3576 | nr_free = count_partial_free_approx(n); |
| 3577 | nr_slabs = node_nr_slabs(n); |
| 3578 | nr_objs = node_nr_objs(n); |
| 3579 | |
| 3580 | pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", |
| 3581 | node, nr_slabs, nr_objs, nr_free); |
| 3582 | } |
| 3583 | } |
| 3584 | #else /* CONFIG_SLUB_DEBUG */ |
| 3585 | static inline void |
| 3586 | slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } |
| 3587 | #endif |
| 3588 | |
| 3589 | static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) |
| 3590 | { |
| 3591 | if (unlikely(slab_test_pfmemalloc(slab))) |
| 3592 | return gfp_pfmemalloc_allowed(gfpflags); |
| 3593 | |
| 3594 | return true; |
| 3595 | } |
| 3596 | |
| 3597 | #ifndef CONFIG_SLUB_TINY |
| 3598 | static inline bool |
| 3599 | __update_cpu_freelist_fast(struct kmem_cache *s, |
| 3600 | void *freelist_old, void *freelist_new, |
| 3601 | unsigned long tid) |
| 3602 | { |
| 3603 | freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; |
| 3604 | freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; |
| 3605 | |
| 3606 | return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, |
| 3607 | &old.full, new.full); |
| 3608 | } |
| 3609 | |
| 3610 | /* |
| 3611 | * Check the slab->freelist and either transfer the freelist to the |
| 3612 | * per cpu freelist or deactivate the slab. |
| 3613 | * |
| 3614 | * The slab is still frozen if the return value is not NULL. |
| 3615 | * |
| 3616 | * If this function returns NULL then the slab has been unfrozen. |
| 3617 | */ |
| 3618 | static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) |
| 3619 | { |
| 3620 | struct slab new; |
| 3621 | unsigned long counters; |
| 3622 | void *freelist; |
| 3623 | |
| 3624 | lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); |
| 3625 | |
| 3626 | do { |
| 3627 | freelist = slab->freelist; |
| 3628 | counters = slab->counters; |
| 3629 | |
| 3630 | new.counters = counters; |
| 3631 | |
| 3632 | new.inuse = slab->objects; |
| 3633 | new.frozen = freelist != NULL; |
| 3634 | |
| 3635 | } while (!__slab_update_freelist(s, slab, |
| 3636 | freelist, counters, |
| 3637 | NULL, new.counters, |
| 3638 | "get_freelist")); |
| 3639 | |
| 3640 | return freelist; |
| 3641 | } |
| 3642 | |
| 3643 | /* |
| 3644 | * Freeze the partial slab and return the pointer to the freelist. |
| 3645 | */ |
| 3646 | static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) |
| 3647 | { |
| 3648 | struct slab new; |
| 3649 | unsigned long counters; |
| 3650 | void *freelist; |
| 3651 | |
| 3652 | do { |
| 3653 | freelist = slab->freelist; |
| 3654 | counters = slab->counters; |
| 3655 | |
| 3656 | new.counters = counters; |
| 3657 | VM_BUG_ON(new.frozen); |
| 3658 | |
| 3659 | new.inuse = slab->objects; |
| 3660 | new.frozen = 1; |
| 3661 | |
| 3662 | } while (!slab_update_freelist(s, slab, |
| 3663 | freelist, counters, |
| 3664 | NULL, new.counters, |
| 3665 | "freeze_slab")); |
| 3666 | |
| 3667 | return freelist; |
| 3668 | } |
| 3669 | |
| 3670 | /* |
| 3671 | * Slow path. The lockless freelist is empty or we need to perform |
| 3672 | * debugging duties. |
| 3673 | * |
| 3674 | * Processing is still very fast if new objects have been freed to the |
| 3675 | * regular freelist. In that case we simply take over the regular freelist |
| 3676 | * as the lockless freelist and zap the regular freelist. |
| 3677 | * |
| 3678 | * If that is not working then we fall back to the partial lists. We take the |
| 3679 | * first element of the freelist as the object to allocate now and move the |
| 3680 | * rest of the freelist to the lockless freelist. |
| 3681 | * |
| 3682 | * And if we were unable to get a new slab from the partial slab lists then |
| 3683 | * we need to allocate a new slab. This is the slowest path since it involves |
| 3684 | * a call to the page allocator and the setup of a new slab. |
| 3685 | * |
| 3686 | * Version of __slab_alloc to use when we know that preemption is |
| 3687 | * already disabled (which is the case for bulk allocation). |
| 3688 | */ |
| 3689 | static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| 3690 | unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) |
| 3691 | { |
| 3692 | void *freelist; |
| 3693 | struct slab *slab; |
| 3694 | unsigned long flags; |
| 3695 | struct partial_context pc; |
| 3696 | bool try_thisnode = true; |
| 3697 | |
| 3698 | stat(s, ALLOC_SLOWPATH); |
| 3699 | |
| 3700 | reread_slab: |
| 3701 | |
| 3702 | slab = READ_ONCE(c->slab); |
| 3703 | if (!slab) { |
| 3704 | /* |
| 3705 | * if the node is not online or has no normal memory, just |
| 3706 | * ignore the node constraint |
| 3707 | */ |
| 3708 | if (unlikely(node != NUMA_NO_NODE && |
| 3709 | !node_isset(node, slab_nodes))) |
| 3710 | node = NUMA_NO_NODE; |
| 3711 | goto new_slab; |
| 3712 | } |
| 3713 | |
| 3714 | if (unlikely(!node_match(slab, node))) { |
| 3715 | /* |
| 3716 | * same as above but node_match() being false already |
| 3717 | * implies node != NUMA_NO_NODE |
| 3718 | */ |
| 3719 | if (!node_isset(node, slab_nodes)) { |
| 3720 | node = NUMA_NO_NODE; |
| 3721 | } else { |
| 3722 | stat(s, ALLOC_NODE_MISMATCH); |
| 3723 | goto deactivate_slab; |
| 3724 | } |
| 3725 | } |
| 3726 | |
| 3727 | /* |
| 3728 | * By rights, we should be searching for a slab page that was |
| 3729 | * PFMEMALLOC but right now, we are losing the pfmemalloc |
| 3730 | * information when the page leaves the per-cpu allocator |
| 3731 | */ |
| 3732 | if (unlikely(!pfmemalloc_match(slab, gfpflags))) |
| 3733 | goto deactivate_slab; |
| 3734 | |
| 3735 | /* must check again c->slab in case we got preempted and it changed */ |
| 3736 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3737 | if (unlikely(slab != c->slab)) { |
| 3738 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3739 | goto reread_slab; |
| 3740 | } |
| 3741 | freelist = c->freelist; |
| 3742 | if (freelist) |
| 3743 | goto load_freelist; |
| 3744 | |
| 3745 | freelist = get_freelist(s, slab); |
| 3746 | |
| 3747 | if (!freelist) { |
| 3748 | c->slab = NULL; |
| 3749 | c->tid = next_tid(c->tid); |
| 3750 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3751 | stat(s, DEACTIVATE_BYPASS); |
| 3752 | goto new_slab; |
| 3753 | } |
| 3754 | |
| 3755 | stat(s, ALLOC_REFILL); |
| 3756 | |
| 3757 | load_freelist: |
| 3758 | |
| 3759 | lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); |
| 3760 | |
| 3761 | /* |
| 3762 | * freelist is pointing to the list of objects to be used. |
| 3763 | * slab is pointing to the slab from which the objects are obtained. |
| 3764 | * That slab must be frozen for per cpu allocations to work. |
| 3765 | */ |
| 3766 | VM_BUG_ON(!c->slab->frozen); |
| 3767 | c->freelist = get_freepointer(s, freelist); |
| 3768 | c->tid = next_tid(c->tid); |
| 3769 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3770 | return freelist; |
| 3771 | |
| 3772 | deactivate_slab: |
| 3773 | |
| 3774 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3775 | if (slab != c->slab) { |
| 3776 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3777 | goto reread_slab; |
| 3778 | } |
| 3779 | freelist = c->freelist; |
| 3780 | c->slab = NULL; |
| 3781 | c->freelist = NULL; |
| 3782 | c->tid = next_tid(c->tid); |
| 3783 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3784 | deactivate_slab(s, slab, freelist); |
| 3785 | |
| 3786 | new_slab: |
| 3787 | |
| 3788 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 3789 | while (slub_percpu_partial(c)) { |
| 3790 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3791 | if (unlikely(c->slab)) { |
| 3792 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3793 | goto reread_slab; |
| 3794 | } |
| 3795 | if (unlikely(!slub_percpu_partial(c))) { |
| 3796 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3797 | /* we were preempted and partial list got empty */ |
| 3798 | goto new_objects; |
| 3799 | } |
| 3800 | |
| 3801 | slab = slub_percpu_partial(c); |
| 3802 | slub_set_percpu_partial(c, slab); |
| 3803 | |
| 3804 | if (likely(node_match(slab, node) && |
| 3805 | pfmemalloc_match(slab, gfpflags))) { |
| 3806 | c->slab = slab; |
| 3807 | freelist = get_freelist(s, slab); |
| 3808 | VM_BUG_ON(!freelist); |
| 3809 | stat(s, CPU_PARTIAL_ALLOC); |
| 3810 | goto load_freelist; |
| 3811 | } |
| 3812 | |
| 3813 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3814 | |
| 3815 | slab->next = NULL; |
| 3816 | __put_partials(s, slab); |
| 3817 | } |
| 3818 | #endif |
| 3819 | |
| 3820 | new_objects: |
| 3821 | |
| 3822 | pc.flags = gfpflags; |
| 3823 | /* |
| 3824 | * When a preferred node is indicated but no __GFP_THISNODE |
| 3825 | * |
| 3826 | * 1) try to get a partial slab from target node only by having |
| 3827 | * __GFP_THISNODE in pc.flags for get_partial() |
| 3828 | * 2) if 1) failed, try to allocate a new slab from target node with |
| 3829 | * GPF_NOWAIT | __GFP_THISNODE opportunistically |
| 3830 | * 3) if 2) failed, retry with original gfpflags which will allow |
| 3831 | * get_partial() try partial lists of other nodes before potentially |
| 3832 | * allocating new page from other nodes |
| 3833 | */ |
| 3834 | if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) |
| 3835 | && try_thisnode)) |
| 3836 | pc.flags = GFP_NOWAIT | __GFP_THISNODE; |
| 3837 | |
| 3838 | pc.orig_size = orig_size; |
| 3839 | slab = get_partial(s, node, &pc); |
| 3840 | if (slab) { |
| 3841 | if (kmem_cache_debug(s)) { |
| 3842 | freelist = pc.object; |
| 3843 | /* |
| 3844 | * For debug caches here we had to go through |
| 3845 | * alloc_single_from_partial() so just store the |
| 3846 | * tracking info and return the object. |
| 3847 | */ |
| 3848 | if (s->flags & SLAB_STORE_USER) |
| 3849 | set_track(s, freelist, TRACK_ALLOC, addr); |
| 3850 | |
| 3851 | return freelist; |
| 3852 | } |
| 3853 | |
| 3854 | freelist = freeze_slab(s, slab); |
| 3855 | goto retry_load_slab; |
| 3856 | } |
| 3857 | |
| 3858 | slub_put_cpu_ptr(s->cpu_slab); |
| 3859 | slab = new_slab(s, pc.flags, node); |
| 3860 | c = slub_get_cpu_ptr(s->cpu_slab); |
| 3861 | |
| 3862 | if (unlikely(!slab)) { |
| 3863 | if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) |
| 3864 | && try_thisnode) { |
| 3865 | try_thisnode = false; |
| 3866 | goto new_objects; |
| 3867 | } |
| 3868 | slab_out_of_memory(s, gfpflags, node); |
| 3869 | return NULL; |
| 3870 | } |
| 3871 | |
| 3872 | stat(s, ALLOC_SLAB); |
| 3873 | |
| 3874 | if (kmem_cache_debug(s)) { |
| 3875 | freelist = alloc_single_from_new_slab(s, slab, orig_size); |
| 3876 | |
| 3877 | if (unlikely(!freelist)) |
| 3878 | goto new_objects; |
| 3879 | |
| 3880 | if (s->flags & SLAB_STORE_USER) |
| 3881 | set_track(s, freelist, TRACK_ALLOC, addr); |
| 3882 | |
| 3883 | return freelist; |
| 3884 | } |
| 3885 | |
| 3886 | /* |
| 3887 | * No other reference to the slab yet so we can |
| 3888 | * muck around with it freely without cmpxchg |
| 3889 | */ |
| 3890 | freelist = slab->freelist; |
| 3891 | slab->freelist = NULL; |
| 3892 | slab->inuse = slab->objects; |
| 3893 | slab->frozen = 1; |
| 3894 | |
| 3895 | inc_slabs_node(s, slab_nid(slab), slab->objects); |
| 3896 | |
| 3897 | if (unlikely(!pfmemalloc_match(slab, gfpflags))) { |
| 3898 | /* |
| 3899 | * For !pfmemalloc_match() case we don't load freelist so that |
| 3900 | * we don't make further mismatched allocations easier. |
| 3901 | */ |
| 3902 | deactivate_slab(s, slab, get_freepointer(s, freelist)); |
| 3903 | return freelist; |
| 3904 | } |
| 3905 | |
| 3906 | retry_load_slab: |
| 3907 | |
| 3908 | local_lock_irqsave(&s->cpu_slab->lock, flags); |
| 3909 | if (unlikely(c->slab)) { |
| 3910 | void *flush_freelist = c->freelist; |
| 3911 | struct slab *flush_slab = c->slab; |
| 3912 | |
| 3913 | c->slab = NULL; |
| 3914 | c->freelist = NULL; |
| 3915 | c->tid = next_tid(c->tid); |
| 3916 | |
| 3917 | local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
| 3918 | |
| 3919 | deactivate_slab(s, flush_slab, flush_freelist); |
| 3920 | |
| 3921 | stat(s, CPUSLAB_FLUSH); |
| 3922 | |
| 3923 | goto retry_load_slab; |
| 3924 | } |
| 3925 | c->slab = slab; |
| 3926 | |
| 3927 | goto load_freelist; |
| 3928 | } |
| 3929 | |
| 3930 | /* |
| 3931 | * A wrapper for ___slab_alloc() for contexts where preemption is not yet |
| 3932 | * disabled. Compensates for possible cpu changes by refetching the per cpu area |
| 3933 | * pointer. |
| 3934 | */ |
| 3935 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| 3936 | unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) |
| 3937 | { |
| 3938 | void *p; |
| 3939 | |
| 3940 | #ifdef CONFIG_PREEMPT_COUNT |
| 3941 | /* |
| 3942 | * We may have been preempted and rescheduled on a different |
| 3943 | * cpu before disabling preemption. Need to reload cpu area |
| 3944 | * pointer. |
| 3945 | */ |
| 3946 | c = slub_get_cpu_ptr(s->cpu_slab); |
| 3947 | #endif |
| 3948 | |
| 3949 | p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); |
| 3950 | #ifdef CONFIG_PREEMPT_COUNT |
| 3951 | slub_put_cpu_ptr(s->cpu_slab); |
| 3952 | #endif |
| 3953 | return p; |
| 3954 | } |
| 3955 | |
| 3956 | static __always_inline void *__slab_alloc_node(struct kmem_cache *s, |
| 3957 | gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) |
| 3958 | { |
| 3959 | struct kmem_cache_cpu *c; |
| 3960 | struct slab *slab; |
| 3961 | unsigned long tid; |
| 3962 | void *object; |
| 3963 | |
| 3964 | redo: |
| 3965 | /* |
| 3966 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
| 3967 | * enabled. We may switch back and forth between cpus while |
| 3968 | * reading from one cpu area. That does not matter as long |
| 3969 | * as we end up on the original cpu again when doing the cmpxchg. |
| 3970 | * |
| 3971 | * We must guarantee that tid and kmem_cache_cpu are retrieved on the |
| 3972 | * same cpu. We read first the kmem_cache_cpu pointer and use it to read |
| 3973 | * the tid. If we are preempted and switched to another cpu between the |
| 3974 | * two reads, it's OK as the two are still associated with the same cpu |
| 3975 | * and cmpxchg later will validate the cpu. |
| 3976 | */ |
| 3977 | c = raw_cpu_ptr(s->cpu_slab); |
| 3978 | tid = READ_ONCE(c->tid); |
| 3979 | |
| 3980 | /* |
| 3981 | * Irqless object alloc/free algorithm used here depends on sequence |
| 3982 | * of fetching cpu_slab's data. tid should be fetched before anything |
| 3983 | * on c to guarantee that object and slab associated with previous tid |
| 3984 | * won't be used with current tid. If we fetch tid first, object and |
| 3985 | * slab could be one associated with next tid and our alloc/free |
| 3986 | * request will be failed. In this case, we will retry. So, no problem. |
| 3987 | */ |
| 3988 | barrier(); |
| 3989 | |
| 3990 | /* |
| 3991 | * The transaction ids are globally unique per cpu and per operation on |
| 3992 | * a per cpu queue. Thus they can be guarantee that the cmpxchg_double |
| 3993 | * occurs on the right processor and that there was no operation on the |
| 3994 | * linked list in between. |
| 3995 | */ |
| 3996 | |
| 3997 | object = c->freelist; |
| 3998 | slab = c->slab; |
| 3999 | |
| 4000 | #ifdef CONFIG_NUMA |
| 4001 | if (static_branch_unlikely(&strict_numa) && |
| 4002 | node == NUMA_NO_NODE) { |
| 4003 | |
| 4004 | struct mempolicy *mpol = current->mempolicy; |
| 4005 | |
| 4006 | if (mpol) { |
| 4007 | /* |
| 4008 | * Special BIND rule support. If existing slab |
| 4009 | * is in permitted set then do not redirect |
| 4010 | * to a particular node. |
| 4011 | * Otherwise we apply the memory policy to get |
| 4012 | * the node we need to allocate on. |
| 4013 | */ |
| 4014 | if (mpol->mode != MPOL_BIND || !slab || |
| 4015 | !node_isset(slab_nid(slab), mpol->nodes)) |
| 4016 | |
| 4017 | node = mempolicy_slab_node(); |
| 4018 | } |
| 4019 | } |
| 4020 | #endif |
| 4021 | |
| 4022 | if (!USE_LOCKLESS_FAST_PATH() || |
| 4023 | unlikely(!object || !slab || !node_match(slab, node))) { |
| 4024 | object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); |
| 4025 | } else { |
| 4026 | void *next_object = get_freepointer_safe(s, object); |
| 4027 | |
| 4028 | /* |
| 4029 | * The cmpxchg will only match if there was no additional |
| 4030 | * operation and if we are on the right processor. |
| 4031 | * |
| 4032 | * The cmpxchg does the following atomically (without lock |
| 4033 | * semantics!) |
| 4034 | * 1. Relocate first pointer to the current per cpu area. |
| 4035 | * 2. Verify that tid and freelist have not been changed |
| 4036 | * 3. If they were not changed replace tid and freelist |
| 4037 | * |
| 4038 | * Since this is without lock semantics the protection is only |
| 4039 | * against code executing on this cpu *not* from access by |
| 4040 | * other cpus. |
| 4041 | */ |
| 4042 | if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { |
| 4043 | note_cmpxchg_failure("slab_alloc", s, tid); |
| 4044 | goto redo; |
| 4045 | } |
| 4046 | prefetch_freepointer(s, next_object); |
| 4047 | stat(s, ALLOC_FASTPATH); |
| 4048 | } |
| 4049 | |
| 4050 | return object; |
| 4051 | } |
| 4052 | #else /* CONFIG_SLUB_TINY */ |
| 4053 | static void *__slab_alloc_node(struct kmem_cache *s, |
| 4054 | gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) |
| 4055 | { |
| 4056 | struct partial_context pc; |
| 4057 | struct slab *slab; |
| 4058 | void *object; |
| 4059 | |
| 4060 | pc.flags = gfpflags; |
| 4061 | pc.orig_size = orig_size; |
| 4062 | slab = get_partial(s, node, &pc); |
| 4063 | |
| 4064 | if (slab) |
| 4065 | return pc.object; |
| 4066 | |
| 4067 | slab = new_slab(s, gfpflags, node); |
| 4068 | if (unlikely(!slab)) { |
| 4069 | slab_out_of_memory(s, gfpflags, node); |
| 4070 | return NULL; |
| 4071 | } |
| 4072 | |
| 4073 | object = alloc_single_from_new_slab(s, slab, orig_size); |
| 4074 | |
| 4075 | return object; |
| 4076 | } |
| 4077 | #endif /* CONFIG_SLUB_TINY */ |
| 4078 | |
| 4079 | /* |
| 4080 | * If the object has been wiped upon free, make sure it's fully initialized by |
| 4081 | * zeroing out freelist pointer. |
| 4082 | * |
| 4083 | * Note that we also wipe custom freelist pointers. |
| 4084 | */ |
| 4085 | static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, |
| 4086 | void *obj) |
| 4087 | { |
| 4088 | if (unlikely(slab_want_init_on_free(s)) && obj && |
| 4089 | !freeptr_outside_object(s)) |
| 4090 | memset((void *)((char *)kasan_reset_tag(obj) + s->offset), |
| 4091 | 0, sizeof(void *)); |
| 4092 | } |
| 4093 | |
| 4094 | static __fastpath_inline |
| 4095 | struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) |
| 4096 | { |
| 4097 | flags &= gfp_allowed_mask; |
| 4098 | |
| 4099 | might_alloc(flags); |
| 4100 | |
| 4101 | if (unlikely(should_failslab(s, flags))) |
| 4102 | return NULL; |
| 4103 | |
| 4104 | return s; |
| 4105 | } |
| 4106 | |
| 4107 | static __fastpath_inline |
| 4108 | bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, |
| 4109 | gfp_t flags, size_t size, void **p, bool init, |
| 4110 | unsigned int orig_size) |
| 4111 | { |
| 4112 | unsigned int zero_size = s->object_size; |
| 4113 | bool kasan_init = init; |
| 4114 | size_t i; |
| 4115 | gfp_t init_flags = flags & gfp_allowed_mask; |
| 4116 | |
| 4117 | /* |
| 4118 | * For kmalloc object, the allocated memory size(object_size) is likely |
| 4119 | * larger than the requested size(orig_size). If redzone check is |
| 4120 | * enabled for the extra space, don't zero it, as it will be redzoned |
| 4121 | * soon. The redzone operation for this extra space could be seen as a |
| 4122 | * replacement of current poisoning under certain debug option, and |
| 4123 | * won't break other sanity checks. |
| 4124 | */ |
| 4125 | if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && |
| 4126 | (s->flags & SLAB_KMALLOC)) |
| 4127 | zero_size = orig_size; |
| 4128 | |
| 4129 | /* |
| 4130 | * When slab_debug is enabled, avoid memory initialization integrated |
| 4131 | * into KASAN and instead zero out the memory via the memset below with |
| 4132 | * the proper size. Otherwise, KASAN might overwrite SLUB redzones and |
| 4133 | * cause false-positive reports. This does not lead to a performance |
| 4134 | * penalty on production builds, as slab_debug is not intended to be |
| 4135 | * enabled there. |
| 4136 | */ |
| 4137 | if (__slub_debug_enabled()) |
| 4138 | kasan_init = false; |
| 4139 | |
| 4140 | /* |
| 4141 | * As memory initialization might be integrated into KASAN, |
| 4142 | * kasan_slab_alloc and initialization memset must be |
| 4143 | * kept together to avoid discrepancies in behavior. |
| 4144 | * |
| 4145 | * As p[i] might get tagged, memset and kmemleak hook come after KASAN. |
| 4146 | */ |
| 4147 | for (i = 0; i < size; i++) { |
| 4148 | p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); |
| 4149 | if (p[i] && init && (!kasan_init || |
| 4150 | !kasan_has_integrated_init())) |
| 4151 | memset(p[i], 0, zero_size); |
| 4152 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
| 4153 | s->flags, init_flags); |
| 4154 | kmsan_slab_alloc(s, p[i], init_flags); |
| 4155 | alloc_tagging_slab_alloc_hook(s, p[i], flags); |
| 4156 | } |
| 4157 | |
| 4158 | return memcg_slab_post_alloc_hook(s, lru, flags, size, p); |
| 4159 | } |
| 4160 | |
| 4161 | /* |
| 4162 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) |
| 4163 | * have the fastpath folded into their functions. So no function call |
| 4164 | * overhead for requests that can be satisfied on the fastpath. |
| 4165 | * |
| 4166 | * The fastpath works by first checking if the lockless freelist can be used. |
| 4167 | * If not then __slab_alloc is called for slow processing. |
| 4168 | * |
| 4169 | * Otherwise we can simply pick the next object from the lockless free list. |
| 4170 | */ |
| 4171 | static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, |
| 4172 | gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) |
| 4173 | { |
| 4174 | void *object; |
| 4175 | bool init = false; |
| 4176 | |
| 4177 | s = slab_pre_alloc_hook(s, gfpflags); |
| 4178 | if (unlikely(!s)) |
| 4179 | return NULL; |
| 4180 | |
| 4181 | object = kfence_alloc(s, orig_size, gfpflags); |
| 4182 | if (unlikely(object)) |
| 4183 | goto out; |
| 4184 | |
| 4185 | object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); |
| 4186 | |
| 4187 | maybe_wipe_obj_freeptr(s, object); |
| 4188 | init = slab_want_init_on_alloc(gfpflags, s); |
| 4189 | |
| 4190 | out: |
| 4191 | /* |
| 4192 | * When init equals 'true', like for kzalloc() family, only |
| 4193 | * @orig_size bytes might be zeroed instead of s->object_size |
| 4194 | * In case this fails due to memcg_slab_post_alloc_hook(), |
| 4195 | * object is set to NULL |
| 4196 | */ |
| 4197 | slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); |
| 4198 | |
| 4199 | return object; |
| 4200 | } |
| 4201 | |
| 4202 | void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) |
| 4203 | { |
| 4204 | void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, |
| 4205 | s->object_size); |
| 4206 | |
| 4207 | trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); |
| 4208 | |
| 4209 | return ret; |
| 4210 | } |
| 4211 | EXPORT_SYMBOL(kmem_cache_alloc_noprof); |
| 4212 | |
| 4213 | void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, |
| 4214 | gfp_t gfpflags) |
| 4215 | { |
| 4216 | void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, |
| 4217 | s->object_size); |
| 4218 | |
| 4219 | trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); |
| 4220 | |
| 4221 | return ret; |
| 4222 | } |
| 4223 | EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); |
| 4224 | |
| 4225 | bool kmem_cache_charge(void *objp, gfp_t gfpflags) |
| 4226 | { |
| 4227 | if (!memcg_kmem_online()) |
| 4228 | return true; |
| 4229 | |
| 4230 | return memcg_slab_post_charge(objp, gfpflags); |
| 4231 | } |
| 4232 | EXPORT_SYMBOL(kmem_cache_charge); |
| 4233 | |
| 4234 | /** |
| 4235 | * kmem_cache_alloc_node - Allocate an object on the specified node |
| 4236 | * @s: The cache to allocate from. |
| 4237 | * @gfpflags: See kmalloc(). |
| 4238 | * @node: node number of the target node. |
| 4239 | * |
| 4240 | * Identical to kmem_cache_alloc but it will allocate memory on the given |
| 4241 | * node, which can improve the performance for cpu bound structures. |
| 4242 | * |
| 4243 | * Fallback to other node is possible if __GFP_THISNODE is not set. |
| 4244 | * |
| 4245 | * Return: pointer to the new object or %NULL in case of error |
| 4246 | */ |
| 4247 | void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) |
| 4248 | { |
| 4249 | void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); |
| 4250 | |
| 4251 | trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); |
| 4252 | |
| 4253 | return ret; |
| 4254 | } |
| 4255 | EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); |
| 4256 | |
| 4257 | /* |
| 4258 | * To avoid unnecessary overhead, we pass through large allocation requests |
| 4259 | * directly to the page allocator. We use __GFP_COMP, because we will need to |
| 4260 | * know the allocation order to free the pages properly in kfree. |
| 4261 | */ |
| 4262 | static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) |
| 4263 | { |
| 4264 | struct folio *folio; |
| 4265 | void *ptr = NULL; |
| 4266 | unsigned int order = get_order(size); |
| 4267 | |
| 4268 | if (unlikely(flags & GFP_SLAB_BUG_MASK)) |
| 4269 | flags = kmalloc_fix_flags(flags); |
| 4270 | |
| 4271 | flags |= __GFP_COMP; |
| 4272 | folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); |
| 4273 | if (folio) { |
| 4274 | ptr = folio_address(folio); |
| 4275 | lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, |
| 4276 | PAGE_SIZE << order); |
| 4277 | __folio_set_large_kmalloc(folio); |
| 4278 | } |
| 4279 | |
| 4280 | ptr = kasan_kmalloc_large(ptr, size, flags); |
| 4281 | /* As ptr might get tagged, call kmemleak hook after KASAN. */ |
| 4282 | kmemleak_alloc(ptr, size, 1, flags); |
| 4283 | kmsan_kmalloc_large(ptr, size, flags); |
| 4284 | |
| 4285 | return ptr; |
| 4286 | } |
| 4287 | |
| 4288 | void *__kmalloc_large_noprof(size_t size, gfp_t flags) |
| 4289 | { |
| 4290 | void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE); |
| 4291 | |
| 4292 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), |
| 4293 | flags, NUMA_NO_NODE); |
| 4294 | return ret; |
| 4295 | } |
| 4296 | EXPORT_SYMBOL(__kmalloc_large_noprof); |
| 4297 | |
| 4298 | void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) |
| 4299 | { |
| 4300 | void *ret = ___kmalloc_large_node(size, flags, node); |
| 4301 | |
| 4302 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), |
| 4303 | flags, node); |
| 4304 | return ret; |
| 4305 | } |
| 4306 | EXPORT_SYMBOL(__kmalloc_large_node_noprof); |
| 4307 | |
| 4308 | static __always_inline |
| 4309 | void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, |
| 4310 | unsigned long caller) |
| 4311 | { |
| 4312 | struct kmem_cache *s; |
| 4313 | void *ret; |
| 4314 | |
| 4315 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { |
| 4316 | ret = __kmalloc_large_node_noprof(size, flags, node); |
| 4317 | trace_kmalloc(caller, ret, size, |
| 4318 | PAGE_SIZE << get_order(size), flags, node); |
| 4319 | return ret; |
| 4320 | } |
| 4321 | |
| 4322 | if (unlikely(!size)) |
| 4323 | return ZERO_SIZE_PTR; |
| 4324 | |
| 4325 | s = kmalloc_slab(size, b, flags, caller); |
| 4326 | |
| 4327 | ret = slab_alloc_node(s, NULL, flags, node, caller, size); |
| 4328 | ret = kasan_kmalloc(s, ret, size, flags); |
| 4329 | trace_kmalloc(caller, ret, size, s->size, flags, node); |
| 4330 | return ret; |
| 4331 | } |
| 4332 | void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) |
| 4333 | { |
| 4334 | return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); |
| 4335 | } |
| 4336 | EXPORT_SYMBOL(__kmalloc_node_noprof); |
| 4337 | |
| 4338 | void *__kmalloc_noprof(size_t size, gfp_t flags) |
| 4339 | { |
| 4340 | return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_); |
| 4341 | } |
| 4342 | EXPORT_SYMBOL(__kmalloc_noprof); |
| 4343 | |
| 4344 | void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, |
| 4345 | int node, unsigned long caller) |
| 4346 | { |
| 4347 | return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); |
| 4348 | |
| 4349 | } |
| 4350 | EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof); |
| 4351 | |
| 4352 | void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
| 4353 | { |
| 4354 | void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, |
| 4355 | _RET_IP_, size); |
| 4356 | |
| 4357 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); |
| 4358 | |
| 4359 | ret = kasan_kmalloc(s, ret, size, gfpflags); |
| 4360 | return ret; |
| 4361 | } |
| 4362 | EXPORT_SYMBOL(__kmalloc_cache_noprof); |
| 4363 | |
| 4364 | void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, |
| 4365 | int node, size_t size) |
| 4366 | { |
| 4367 | void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); |
| 4368 | |
| 4369 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); |
| 4370 | |
| 4371 | ret = kasan_kmalloc(s, ret, size, gfpflags); |
| 4372 | return ret; |
| 4373 | } |
| 4374 | EXPORT_SYMBOL(__kmalloc_cache_node_noprof); |
| 4375 | |
| 4376 | static noinline void free_to_partial_list( |
| 4377 | struct kmem_cache *s, struct slab *slab, |
| 4378 | void *head, void *tail, int bulk_cnt, |
| 4379 | unsigned long addr) |
| 4380 | { |
| 4381 | struct kmem_cache_node *n = get_node(s, slab_nid(slab)); |
| 4382 | struct slab *slab_free = NULL; |
| 4383 | int cnt = bulk_cnt; |
| 4384 | unsigned long flags; |
| 4385 | depot_stack_handle_t handle = 0; |
| 4386 | |
| 4387 | if (s->flags & SLAB_STORE_USER) |
| 4388 | handle = set_track_prepare(); |
| 4389 | |
| 4390 | spin_lock_irqsave(&n->list_lock, flags); |
| 4391 | |
| 4392 | if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { |
| 4393 | void *prior = slab->freelist; |
| 4394 | |
| 4395 | /* Perform the actual freeing while we still hold the locks */ |
| 4396 | slab->inuse -= cnt; |
| 4397 | set_freepointer(s, tail, prior); |
| 4398 | slab->freelist = head; |
| 4399 | |
| 4400 | /* |
| 4401 | * If the slab is empty, and node's partial list is full, |
| 4402 | * it should be discarded anyway no matter it's on full or |
| 4403 | * partial list. |
| 4404 | */ |
| 4405 | if (slab->inuse == 0 && n->nr_partial >= s->min_partial) |
| 4406 | slab_free = slab; |
| 4407 | |
| 4408 | if (!prior) { |
| 4409 | /* was on full list */ |
| 4410 | remove_full(s, n, slab); |
| 4411 | if (!slab_free) { |
| 4412 | add_partial(n, slab, DEACTIVATE_TO_TAIL); |
| 4413 | stat(s, FREE_ADD_PARTIAL); |
| 4414 | } |
| 4415 | } else if (slab_free) { |
| 4416 | remove_partial(n, slab); |
| 4417 | stat(s, FREE_REMOVE_PARTIAL); |
| 4418 | } |
| 4419 | } |
| 4420 | |
| 4421 | if (slab_free) { |
| 4422 | /* |
| 4423 | * Update the counters while still holding n->list_lock to |
| 4424 | * prevent spurious validation warnings |
| 4425 | */ |
| 4426 | dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); |
| 4427 | } |
| 4428 | |
| 4429 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 4430 | |
| 4431 | if (slab_free) { |
| 4432 | stat(s, FREE_SLAB); |
| 4433 | free_slab(s, slab_free); |
| 4434 | } |
| 4435 | } |
| 4436 | |
| 4437 | /* |
| 4438 | * Slow path handling. This may still be called frequently since objects |
| 4439 | * have a longer lifetime than the cpu slabs in most processing loads. |
| 4440 | * |
| 4441 | * So we still attempt to reduce cache line usage. Just take the slab |
| 4442 | * lock and free the item. If there is no additional partial slab |
| 4443 | * handling required then we can return immediately. |
| 4444 | */ |
| 4445 | static void __slab_free(struct kmem_cache *s, struct slab *slab, |
| 4446 | void *head, void *tail, int cnt, |
| 4447 | unsigned long addr) |
| 4448 | |
| 4449 | { |
| 4450 | void *prior; |
| 4451 | int was_frozen; |
| 4452 | struct slab new; |
| 4453 | unsigned long counters; |
| 4454 | struct kmem_cache_node *n = NULL; |
| 4455 | unsigned long flags; |
| 4456 | bool on_node_partial; |
| 4457 | |
| 4458 | stat(s, FREE_SLOWPATH); |
| 4459 | |
| 4460 | if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { |
| 4461 | free_to_partial_list(s, slab, head, tail, cnt, addr); |
| 4462 | return; |
| 4463 | } |
| 4464 | |
| 4465 | do { |
| 4466 | if (unlikely(n)) { |
| 4467 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 4468 | n = NULL; |
| 4469 | } |
| 4470 | prior = slab->freelist; |
| 4471 | counters = slab->counters; |
| 4472 | set_freepointer(s, tail, prior); |
| 4473 | new.counters = counters; |
| 4474 | was_frozen = new.frozen; |
| 4475 | new.inuse -= cnt; |
| 4476 | if ((!new.inuse || !prior) && !was_frozen) { |
| 4477 | /* Needs to be taken off a list */ |
| 4478 | if (!kmem_cache_has_cpu_partial(s) || prior) { |
| 4479 | |
| 4480 | n = get_node(s, slab_nid(slab)); |
| 4481 | /* |
| 4482 | * Speculatively acquire the list_lock. |
| 4483 | * If the cmpxchg does not succeed then we may |
| 4484 | * drop the list_lock without any processing. |
| 4485 | * |
| 4486 | * Otherwise the list_lock will synchronize with |
| 4487 | * other processors updating the list of slabs. |
| 4488 | */ |
| 4489 | spin_lock_irqsave(&n->list_lock, flags); |
| 4490 | |
| 4491 | on_node_partial = slab_test_node_partial(slab); |
| 4492 | } |
| 4493 | } |
| 4494 | |
| 4495 | } while (!slab_update_freelist(s, slab, |
| 4496 | prior, counters, |
| 4497 | head, new.counters, |
| 4498 | "__slab_free")); |
| 4499 | |
| 4500 | if (likely(!n)) { |
| 4501 | |
| 4502 | if (likely(was_frozen)) { |
| 4503 | /* |
| 4504 | * The list lock was not taken therefore no list |
| 4505 | * activity can be necessary. |
| 4506 | */ |
| 4507 | stat(s, FREE_FROZEN); |
| 4508 | } else if (kmem_cache_has_cpu_partial(s) && !prior) { |
| 4509 | /* |
| 4510 | * If we started with a full slab then put it onto the |
| 4511 | * per cpu partial list. |
| 4512 | */ |
| 4513 | put_cpu_partial(s, slab, 1); |
| 4514 | stat(s, CPU_PARTIAL_FREE); |
| 4515 | } |
| 4516 | |
| 4517 | return; |
| 4518 | } |
| 4519 | |
| 4520 | /* |
| 4521 | * This slab was partially empty but not on the per-node partial list, |
| 4522 | * in which case we shouldn't manipulate its list, just return. |
| 4523 | */ |
| 4524 | if (prior && !on_node_partial) { |
| 4525 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 4526 | return; |
| 4527 | } |
| 4528 | |
| 4529 | if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) |
| 4530 | goto slab_empty; |
| 4531 | |
| 4532 | /* |
| 4533 | * Objects left in the slab. If it was not on the partial list before |
| 4534 | * then add it. |
| 4535 | */ |
| 4536 | if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { |
| 4537 | add_partial(n, slab, DEACTIVATE_TO_TAIL); |
| 4538 | stat(s, FREE_ADD_PARTIAL); |
| 4539 | } |
| 4540 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 4541 | return; |
| 4542 | |
| 4543 | slab_empty: |
| 4544 | if (prior) { |
| 4545 | /* |
| 4546 | * Slab on the partial list. |
| 4547 | */ |
| 4548 | remove_partial(n, slab); |
| 4549 | stat(s, FREE_REMOVE_PARTIAL); |
| 4550 | } |
| 4551 | |
| 4552 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 4553 | stat(s, FREE_SLAB); |
| 4554 | discard_slab(s, slab); |
| 4555 | } |
| 4556 | |
| 4557 | #ifndef CONFIG_SLUB_TINY |
| 4558 | /* |
| 4559 | * Fastpath with forced inlining to produce a kfree and kmem_cache_free that |
| 4560 | * can perform fastpath freeing without additional function calls. |
| 4561 | * |
| 4562 | * The fastpath is only possible if we are freeing to the current cpu slab |
| 4563 | * of this processor. This typically the case if we have just allocated |
| 4564 | * the item before. |
| 4565 | * |
| 4566 | * If fastpath is not possible then fall back to __slab_free where we deal |
| 4567 | * with all sorts of special processing. |
| 4568 | * |
| 4569 | * Bulk free of a freelist with several objects (all pointing to the |
| 4570 | * same slab) possible by specifying head and tail ptr, plus objects |
| 4571 | * count (cnt). Bulk free indicated by tail pointer being set. |
| 4572 | */ |
| 4573 | static __always_inline void do_slab_free(struct kmem_cache *s, |
| 4574 | struct slab *slab, void *head, void *tail, |
| 4575 | int cnt, unsigned long addr) |
| 4576 | { |
| 4577 | struct kmem_cache_cpu *c; |
| 4578 | unsigned long tid; |
| 4579 | void **freelist; |
| 4580 | |
| 4581 | redo: |
| 4582 | /* |
| 4583 | * Determine the currently cpus per cpu slab. |
| 4584 | * The cpu may change afterward. However that does not matter since |
| 4585 | * data is retrieved via this pointer. If we are on the same cpu |
| 4586 | * during the cmpxchg then the free will succeed. |
| 4587 | */ |
| 4588 | c = raw_cpu_ptr(s->cpu_slab); |
| 4589 | tid = READ_ONCE(c->tid); |
| 4590 | |
| 4591 | /* Same with comment on barrier() in __slab_alloc_node() */ |
| 4592 | barrier(); |
| 4593 | |
| 4594 | if (unlikely(slab != c->slab)) { |
| 4595 | __slab_free(s, slab, head, tail, cnt, addr); |
| 4596 | return; |
| 4597 | } |
| 4598 | |
| 4599 | if (USE_LOCKLESS_FAST_PATH()) { |
| 4600 | freelist = READ_ONCE(c->freelist); |
| 4601 | |
| 4602 | set_freepointer(s, tail, freelist); |
| 4603 | |
| 4604 | if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { |
| 4605 | note_cmpxchg_failure("slab_free", s, tid); |
| 4606 | goto redo; |
| 4607 | } |
| 4608 | } else { |
| 4609 | /* Update the free list under the local lock */ |
| 4610 | local_lock(&s->cpu_slab->lock); |
| 4611 | c = this_cpu_ptr(s->cpu_slab); |
| 4612 | if (unlikely(slab != c->slab)) { |
| 4613 | local_unlock(&s->cpu_slab->lock); |
| 4614 | goto redo; |
| 4615 | } |
| 4616 | tid = c->tid; |
| 4617 | freelist = c->freelist; |
| 4618 | |
| 4619 | set_freepointer(s, tail, freelist); |
| 4620 | c->freelist = head; |
| 4621 | c->tid = next_tid(tid); |
| 4622 | |
| 4623 | local_unlock(&s->cpu_slab->lock); |
| 4624 | } |
| 4625 | stat_add(s, FREE_FASTPATH, cnt); |
| 4626 | } |
| 4627 | #else /* CONFIG_SLUB_TINY */ |
| 4628 | static void do_slab_free(struct kmem_cache *s, |
| 4629 | struct slab *slab, void *head, void *tail, |
| 4630 | int cnt, unsigned long addr) |
| 4631 | { |
| 4632 | __slab_free(s, slab, head, tail, cnt, addr); |
| 4633 | } |
| 4634 | #endif /* CONFIG_SLUB_TINY */ |
| 4635 | |
| 4636 | static __fastpath_inline |
| 4637 | void slab_free(struct kmem_cache *s, struct slab *slab, void *object, |
| 4638 | unsigned long addr) |
| 4639 | { |
| 4640 | memcg_slab_free_hook(s, slab, &object, 1); |
| 4641 | alloc_tagging_slab_free_hook(s, slab, &object, 1); |
| 4642 | |
| 4643 | if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) |
| 4644 | do_slab_free(s, slab, object, object, 1, addr); |
| 4645 | } |
| 4646 | |
| 4647 | #ifdef CONFIG_MEMCG |
| 4648 | /* Do not inline the rare memcg charging failed path into the allocation path */ |
| 4649 | static noinline |
| 4650 | void memcg_alloc_abort_single(struct kmem_cache *s, void *object) |
| 4651 | { |
| 4652 | if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false))) |
| 4653 | do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); |
| 4654 | } |
| 4655 | #endif |
| 4656 | |
| 4657 | static __fastpath_inline |
| 4658 | void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, |
| 4659 | void *tail, void **p, int cnt, unsigned long addr) |
| 4660 | { |
| 4661 | memcg_slab_free_hook(s, slab, p, cnt); |
| 4662 | alloc_tagging_slab_free_hook(s, slab, p, cnt); |
| 4663 | /* |
| 4664 | * With KASAN enabled slab_free_freelist_hook modifies the freelist |
| 4665 | * to remove objects, whose reuse must be delayed. |
| 4666 | */ |
| 4667 | if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) |
| 4668 | do_slab_free(s, slab, head, tail, cnt, addr); |
| 4669 | } |
| 4670 | |
| 4671 | #ifdef CONFIG_SLUB_RCU_DEBUG |
| 4672 | static void slab_free_after_rcu_debug(struct rcu_head *rcu_head) |
| 4673 | { |
| 4674 | struct rcu_delayed_free *delayed_free = |
| 4675 | container_of(rcu_head, struct rcu_delayed_free, head); |
| 4676 | void *object = delayed_free->object; |
| 4677 | struct slab *slab = virt_to_slab(object); |
| 4678 | struct kmem_cache *s; |
| 4679 | |
| 4680 | kfree(delayed_free); |
| 4681 | |
| 4682 | if (WARN_ON(is_kfence_address(object))) |
| 4683 | return; |
| 4684 | |
| 4685 | /* find the object and the cache again */ |
| 4686 | if (WARN_ON(!slab)) |
| 4687 | return; |
| 4688 | s = slab->slab_cache; |
| 4689 | if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU))) |
| 4690 | return; |
| 4691 | |
| 4692 | /* resume freeing */ |
| 4693 | if (slab_free_hook(s, object, slab_want_init_on_free(s), true)) |
| 4694 | do_slab_free(s, slab, object, object, 1, _THIS_IP_); |
| 4695 | } |
| 4696 | #endif /* CONFIG_SLUB_RCU_DEBUG */ |
| 4697 | |
| 4698 | #ifdef CONFIG_KASAN_GENERIC |
| 4699 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) |
| 4700 | { |
| 4701 | do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); |
| 4702 | } |
| 4703 | #endif |
| 4704 | |
| 4705 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
| 4706 | { |
| 4707 | struct slab *slab; |
| 4708 | |
| 4709 | slab = virt_to_slab(obj); |
| 4710 | if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) |
| 4711 | return NULL; |
| 4712 | return slab->slab_cache; |
| 4713 | } |
| 4714 | |
| 4715 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
| 4716 | { |
| 4717 | struct kmem_cache *cachep; |
| 4718 | |
| 4719 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && |
| 4720 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) |
| 4721 | return s; |
| 4722 | |
| 4723 | cachep = virt_to_cache(x); |
| 4724 | if (WARN(cachep && cachep != s, |
| 4725 | "%s: Wrong slab cache. %s but object is from %s\n", |
| 4726 | __func__, s->name, cachep->name)) |
| 4727 | print_tracking(cachep, x); |
| 4728 | return cachep; |
| 4729 | } |
| 4730 | |
| 4731 | /** |
| 4732 | * kmem_cache_free - Deallocate an object |
| 4733 | * @s: The cache the allocation was from. |
| 4734 | * @x: The previously allocated object. |
| 4735 | * |
| 4736 | * Free an object which was previously allocated from this |
| 4737 | * cache. |
| 4738 | */ |
| 4739 | void kmem_cache_free(struct kmem_cache *s, void *x) |
| 4740 | { |
| 4741 | s = cache_from_obj(s, x); |
| 4742 | if (!s) |
| 4743 | return; |
| 4744 | trace_kmem_cache_free(_RET_IP_, x, s); |
| 4745 | slab_free(s, virt_to_slab(x), x, _RET_IP_); |
| 4746 | } |
| 4747 | EXPORT_SYMBOL(kmem_cache_free); |
| 4748 | |
| 4749 | static void free_large_kmalloc(struct folio *folio, void *object) |
| 4750 | { |
| 4751 | unsigned int order = folio_order(folio); |
| 4752 | |
| 4753 | if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) { |
| 4754 | dump_page(&folio->page, "Not a kmalloc allocation"); |
| 4755 | return; |
| 4756 | } |
| 4757 | |
| 4758 | if (WARN_ON_ONCE(order == 0)) |
| 4759 | pr_warn_once("object pointer: 0x%p\n", object); |
| 4760 | |
| 4761 | kmemleak_free(object); |
| 4762 | kasan_kfree_large(object); |
| 4763 | kmsan_kfree_large(object); |
| 4764 | |
| 4765 | lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, |
| 4766 | -(PAGE_SIZE << order)); |
| 4767 | __folio_clear_large_kmalloc(folio); |
| 4768 | folio_put(folio); |
| 4769 | } |
| 4770 | |
| 4771 | /* |
| 4772 | * Given an rcu_head embedded within an object obtained from kvmalloc at an |
| 4773 | * offset < 4k, free the object in question. |
| 4774 | */ |
| 4775 | void kvfree_rcu_cb(struct rcu_head *head) |
| 4776 | { |
| 4777 | void *obj = head; |
| 4778 | struct folio *folio; |
| 4779 | struct slab *slab; |
| 4780 | struct kmem_cache *s; |
| 4781 | void *slab_addr; |
| 4782 | |
| 4783 | if (is_vmalloc_addr(obj)) { |
| 4784 | obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); |
| 4785 | vfree(obj); |
| 4786 | return; |
| 4787 | } |
| 4788 | |
| 4789 | folio = virt_to_folio(obj); |
| 4790 | if (!folio_test_slab(folio)) { |
| 4791 | /* |
| 4792 | * rcu_head offset can be only less than page size so no need to |
| 4793 | * consider folio order |
| 4794 | */ |
| 4795 | obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); |
| 4796 | free_large_kmalloc(folio, obj); |
| 4797 | return; |
| 4798 | } |
| 4799 | |
| 4800 | slab = folio_slab(folio); |
| 4801 | s = slab->slab_cache; |
| 4802 | slab_addr = folio_address(folio); |
| 4803 | |
| 4804 | if (is_kfence_address(obj)) { |
| 4805 | obj = kfence_object_start(obj); |
| 4806 | } else { |
| 4807 | unsigned int idx = __obj_to_index(s, slab_addr, obj); |
| 4808 | |
| 4809 | obj = slab_addr + s->size * idx; |
| 4810 | obj = fixup_red_left(s, obj); |
| 4811 | } |
| 4812 | |
| 4813 | slab_free(s, slab, obj, _RET_IP_); |
| 4814 | } |
| 4815 | |
| 4816 | /** |
| 4817 | * kfree - free previously allocated memory |
| 4818 | * @object: pointer returned by kmalloc() or kmem_cache_alloc() |
| 4819 | * |
| 4820 | * If @object is NULL, no operation is performed. |
| 4821 | */ |
| 4822 | void kfree(const void *object) |
| 4823 | { |
| 4824 | struct folio *folio; |
| 4825 | struct slab *slab; |
| 4826 | struct kmem_cache *s; |
| 4827 | void *x = (void *)object; |
| 4828 | |
| 4829 | trace_kfree(_RET_IP_, object); |
| 4830 | |
| 4831 | if (unlikely(ZERO_OR_NULL_PTR(object))) |
| 4832 | return; |
| 4833 | |
| 4834 | folio = virt_to_folio(object); |
| 4835 | if (unlikely(!folio_test_slab(folio))) { |
| 4836 | free_large_kmalloc(folio, (void *)object); |
| 4837 | return; |
| 4838 | } |
| 4839 | |
| 4840 | slab = folio_slab(folio); |
| 4841 | s = slab->slab_cache; |
| 4842 | slab_free(s, slab, x, _RET_IP_); |
| 4843 | } |
| 4844 | EXPORT_SYMBOL(kfree); |
| 4845 | |
| 4846 | static __always_inline __realloc_size(2) void * |
| 4847 | __do_krealloc(const void *p, size_t new_size, gfp_t flags) |
| 4848 | { |
| 4849 | void *ret; |
| 4850 | size_t ks = 0; |
| 4851 | int orig_size = 0; |
| 4852 | struct kmem_cache *s = NULL; |
| 4853 | |
| 4854 | if (unlikely(ZERO_OR_NULL_PTR(p))) |
| 4855 | goto alloc_new; |
| 4856 | |
| 4857 | /* Check for double-free. */ |
| 4858 | if (!kasan_check_byte(p)) |
| 4859 | return NULL; |
| 4860 | |
| 4861 | if (is_kfence_address(p)) { |
| 4862 | ks = orig_size = kfence_ksize(p); |
| 4863 | } else { |
| 4864 | struct folio *folio; |
| 4865 | |
| 4866 | folio = virt_to_folio(p); |
| 4867 | if (unlikely(!folio_test_slab(folio))) { |
| 4868 | /* Big kmalloc object */ |
| 4869 | WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE); |
| 4870 | WARN_ON(p != folio_address(folio)); |
| 4871 | ks = folio_size(folio); |
| 4872 | } else { |
| 4873 | s = folio_slab(folio)->slab_cache; |
| 4874 | orig_size = get_orig_size(s, (void *)p); |
| 4875 | ks = s->object_size; |
| 4876 | } |
| 4877 | } |
| 4878 | |
| 4879 | /* If the old object doesn't fit, allocate a bigger one */ |
| 4880 | if (new_size > ks) |
| 4881 | goto alloc_new; |
| 4882 | |
| 4883 | /* Zero out spare memory. */ |
| 4884 | if (want_init_on_alloc(flags)) { |
| 4885 | kasan_disable_current(); |
| 4886 | if (orig_size && orig_size < new_size) |
| 4887 | memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size); |
| 4888 | else |
| 4889 | memset(kasan_reset_tag(p) + new_size, 0, ks - new_size); |
| 4890 | kasan_enable_current(); |
| 4891 | } |
| 4892 | |
| 4893 | /* Setup kmalloc redzone when needed */ |
| 4894 | if (s && slub_debug_orig_size(s)) { |
| 4895 | set_orig_size(s, (void *)p, new_size); |
| 4896 | if (s->flags & SLAB_RED_ZONE && new_size < ks) |
| 4897 | memset_no_sanitize_memory(kasan_reset_tag(p) + new_size, |
| 4898 | SLUB_RED_ACTIVE, ks - new_size); |
| 4899 | } |
| 4900 | |
| 4901 | p = kasan_krealloc(p, new_size, flags); |
| 4902 | return (void *)p; |
| 4903 | |
| 4904 | alloc_new: |
| 4905 | ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_); |
| 4906 | if (ret && p) { |
| 4907 | /* Disable KASAN checks as the object's redzone is accessed. */ |
| 4908 | kasan_disable_current(); |
| 4909 | memcpy(ret, kasan_reset_tag(p), orig_size ?: ks); |
| 4910 | kasan_enable_current(); |
| 4911 | } |
| 4912 | |
| 4913 | return ret; |
| 4914 | } |
| 4915 | |
| 4916 | /** |
| 4917 | * krealloc - reallocate memory. The contents will remain unchanged. |
| 4918 | * @p: object to reallocate memory for. |
| 4919 | * @new_size: how many bytes of memory are required. |
| 4920 | * @flags: the type of memory to allocate. |
| 4921 | * |
| 4922 | * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size |
| 4923 | * is 0 and @p is not a %NULL pointer, the object pointed to is freed. |
| 4924 | * |
| 4925 | * If __GFP_ZERO logic is requested, callers must ensure that, starting with the |
| 4926 | * initial memory allocation, every subsequent call to this API for the same |
| 4927 | * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that |
| 4928 | * __GFP_ZERO is not fully honored by this API. |
| 4929 | * |
| 4930 | * When slub_debug_orig_size() is off, krealloc() only knows about the bucket |
| 4931 | * size of an allocation (but not the exact size it was allocated with) and |
| 4932 | * hence implements the following semantics for shrinking and growing buffers |
| 4933 | * with __GFP_ZERO. |
| 4934 | * |
| 4935 | * new bucket |
| 4936 | * 0 size size |
| 4937 | * |--------|----------------| |
| 4938 | * | keep | zero | |
| 4939 | * |
| 4940 | * Otherwise, the original allocation size 'orig_size' could be used to |
| 4941 | * precisely clear the requested size, and the new size will also be stored |
| 4942 | * as the new 'orig_size'. |
| 4943 | * |
| 4944 | * In any case, the contents of the object pointed to are preserved up to the |
| 4945 | * lesser of the new and old sizes. |
| 4946 | * |
| 4947 | * Return: pointer to the allocated memory or %NULL in case of error |
| 4948 | */ |
| 4949 | void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags) |
| 4950 | { |
| 4951 | void *ret; |
| 4952 | |
| 4953 | if (unlikely(!new_size)) { |
| 4954 | kfree(p); |
| 4955 | return ZERO_SIZE_PTR; |
| 4956 | } |
| 4957 | |
| 4958 | ret = __do_krealloc(p, new_size, flags); |
| 4959 | if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) |
| 4960 | kfree(p); |
| 4961 | |
| 4962 | return ret; |
| 4963 | } |
| 4964 | EXPORT_SYMBOL(krealloc_noprof); |
| 4965 | |
| 4966 | static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size) |
| 4967 | { |
| 4968 | /* |
| 4969 | * We want to attempt a large physically contiguous block first because |
| 4970 | * it is less likely to fragment multiple larger blocks and therefore |
| 4971 | * contribute to a long term fragmentation less than vmalloc fallback. |
| 4972 | * However make sure that larger requests are not too disruptive - i.e. |
| 4973 | * do not direct reclaim unless physically continuous memory is preferred |
| 4974 | * (__GFP_RETRY_MAYFAIL mode). We still kick in kswapd/kcompactd to |
| 4975 | * start working in the background |
| 4976 | */ |
| 4977 | if (size > PAGE_SIZE) { |
| 4978 | flags |= __GFP_NOWARN; |
| 4979 | |
| 4980 | if (!(flags & __GFP_RETRY_MAYFAIL)) |
| 4981 | flags &= ~__GFP_DIRECT_RECLAIM; |
| 4982 | |
| 4983 | /* nofail semantic is implemented by the vmalloc fallback */ |
| 4984 | flags &= ~__GFP_NOFAIL; |
| 4985 | } |
| 4986 | |
| 4987 | return flags; |
| 4988 | } |
| 4989 | |
| 4990 | /** |
| 4991 | * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon |
| 4992 | * failure, fall back to non-contiguous (vmalloc) allocation. |
| 4993 | * @size: size of the request. |
| 4994 | * @b: which set of kmalloc buckets to allocate from. |
| 4995 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. |
| 4996 | * @node: numa node to allocate from |
| 4997 | * |
| 4998 | * Uses kmalloc to get the memory but if the allocation fails then falls back |
| 4999 | * to the vmalloc allocator. Use kvfree for freeing the memory. |
| 5000 | * |
| 5001 | * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. |
| 5002 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is |
| 5003 | * preferable to the vmalloc fallback, due to visible performance drawbacks. |
| 5004 | * |
| 5005 | * Return: pointer to the allocated memory of %NULL in case of failure |
| 5006 | */ |
| 5007 | void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) |
| 5008 | { |
| 5009 | void *ret; |
| 5010 | |
| 5011 | /* |
| 5012 | * It doesn't really make sense to fallback to vmalloc for sub page |
| 5013 | * requests |
| 5014 | */ |
| 5015 | ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), |
| 5016 | kmalloc_gfp_adjust(flags, size), |
| 5017 | node, _RET_IP_); |
| 5018 | if (ret || size <= PAGE_SIZE) |
| 5019 | return ret; |
| 5020 | |
| 5021 | /* non-sleeping allocations are not supported by vmalloc */ |
| 5022 | if (!gfpflags_allow_blocking(flags)) |
| 5023 | return NULL; |
| 5024 | |
| 5025 | /* Don't even allow crazy sizes */ |
| 5026 | if (unlikely(size > INT_MAX)) { |
| 5027 | WARN_ON_ONCE(!(flags & __GFP_NOWARN)); |
| 5028 | return NULL; |
| 5029 | } |
| 5030 | |
| 5031 | /* |
| 5032 | * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, |
| 5033 | * since the callers already cannot assume anything |
| 5034 | * about the resulting pointer, and cannot play |
| 5035 | * protection games. |
| 5036 | */ |
| 5037 | return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, |
| 5038 | flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, |
| 5039 | node, __builtin_return_address(0)); |
| 5040 | } |
| 5041 | EXPORT_SYMBOL(__kvmalloc_node_noprof); |
| 5042 | |
| 5043 | /** |
| 5044 | * kvfree() - Free memory. |
| 5045 | * @addr: Pointer to allocated memory. |
| 5046 | * |
| 5047 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
| 5048 | * It is slightly more efficient to use kfree() or vfree() if you are certain |
| 5049 | * that you know which one to use. |
| 5050 | * |
| 5051 | * Context: Either preemptible task context or not-NMI interrupt. |
| 5052 | */ |
| 5053 | void kvfree(const void *addr) |
| 5054 | { |
| 5055 | if (is_vmalloc_addr(addr)) |
| 5056 | vfree(addr); |
| 5057 | else |
| 5058 | kfree(addr); |
| 5059 | } |
| 5060 | EXPORT_SYMBOL(kvfree); |
| 5061 | |
| 5062 | /** |
| 5063 | * kvfree_sensitive - Free a data object containing sensitive information. |
| 5064 | * @addr: address of the data object to be freed. |
| 5065 | * @len: length of the data object. |
| 5066 | * |
| 5067 | * Use the special memzero_explicit() function to clear the content of a |
| 5068 | * kvmalloc'ed object containing sensitive data to make sure that the |
| 5069 | * compiler won't optimize out the data clearing. |
| 5070 | */ |
| 5071 | void kvfree_sensitive(const void *addr, size_t len) |
| 5072 | { |
| 5073 | if (likely(!ZERO_OR_NULL_PTR(addr))) { |
| 5074 | memzero_explicit((void *)addr, len); |
| 5075 | kvfree(addr); |
| 5076 | } |
| 5077 | } |
| 5078 | EXPORT_SYMBOL(kvfree_sensitive); |
| 5079 | |
| 5080 | /** |
| 5081 | * kvrealloc - reallocate memory; contents remain unchanged |
| 5082 | * @p: object to reallocate memory for |
| 5083 | * @size: the size to reallocate |
| 5084 | * @flags: the flags for the page level allocator |
| 5085 | * |
| 5086 | * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0 |
| 5087 | * and @p is not a %NULL pointer, the object pointed to is freed. |
| 5088 | * |
| 5089 | * If __GFP_ZERO logic is requested, callers must ensure that, starting with the |
| 5090 | * initial memory allocation, every subsequent call to this API for the same |
| 5091 | * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that |
| 5092 | * __GFP_ZERO is not fully honored by this API. |
| 5093 | * |
| 5094 | * In any case, the contents of the object pointed to are preserved up to the |
| 5095 | * lesser of the new and old sizes. |
| 5096 | * |
| 5097 | * This function must not be called concurrently with itself or kvfree() for the |
| 5098 | * same memory allocation. |
| 5099 | * |
| 5100 | * Return: pointer to the allocated memory or %NULL in case of error |
| 5101 | */ |
| 5102 | void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags) |
| 5103 | { |
| 5104 | void *n; |
| 5105 | |
| 5106 | if (is_vmalloc_addr(p)) |
| 5107 | return vrealloc_noprof(p, size, flags); |
| 5108 | |
| 5109 | n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size)); |
| 5110 | if (!n) { |
| 5111 | /* We failed to krealloc(), fall back to kvmalloc(). */ |
| 5112 | n = kvmalloc_noprof(size, flags); |
| 5113 | if (!n) |
| 5114 | return NULL; |
| 5115 | |
| 5116 | if (p) { |
| 5117 | /* We already know that `p` is not a vmalloc address. */ |
| 5118 | kasan_disable_current(); |
| 5119 | memcpy(n, kasan_reset_tag(p), ksize(p)); |
| 5120 | kasan_enable_current(); |
| 5121 | |
| 5122 | kfree(p); |
| 5123 | } |
| 5124 | } |
| 5125 | |
| 5126 | return n; |
| 5127 | } |
| 5128 | EXPORT_SYMBOL(kvrealloc_noprof); |
| 5129 | |
| 5130 | struct detached_freelist { |
| 5131 | struct slab *slab; |
| 5132 | void *tail; |
| 5133 | void *freelist; |
| 5134 | int cnt; |
| 5135 | struct kmem_cache *s; |
| 5136 | }; |
| 5137 | |
| 5138 | /* |
| 5139 | * This function progressively scans the array with free objects (with |
| 5140 | * a limited look ahead) and extract objects belonging to the same |
| 5141 | * slab. It builds a detached freelist directly within the given |
| 5142 | * slab/objects. This can happen without any need for |
| 5143 | * synchronization, because the objects are owned by running process. |
| 5144 | * The freelist is build up as a single linked list in the objects. |
| 5145 | * The idea is, that this detached freelist can then be bulk |
| 5146 | * transferred to the real freelist(s), but only requiring a single |
| 5147 | * synchronization primitive. Look ahead in the array is limited due |
| 5148 | * to performance reasons. |
| 5149 | */ |
| 5150 | static inline |
| 5151 | int build_detached_freelist(struct kmem_cache *s, size_t size, |
| 5152 | void **p, struct detached_freelist *df) |
| 5153 | { |
| 5154 | int lookahead = 3; |
| 5155 | void *object; |
| 5156 | struct folio *folio; |
| 5157 | size_t same; |
| 5158 | |
| 5159 | object = p[--size]; |
| 5160 | folio = virt_to_folio(object); |
| 5161 | if (!s) { |
| 5162 | /* Handle kalloc'ed objects */ |
| 5163 | if (unlikely(!folio_test_slab(folio))) { |
| 5164 | free_large_kmalloc(folio, object); |
| 5165 | df->slab = NULL; |
| 5166 | return size; |
| 5167 | } |
| 5168 | /* Derive kmem_cache from object */ |
| 5169 | df->slab = folio_slab(folio); |
| 5170 | df->s = df->slab->slab_cache; |
| 5171 | } else { |
| 5172 | df->slab = folio_slab(folio); |
| 5173 | df->s = cache_from_obj(s, object); /* Support for memcg */ |
| 5174 | } |
| 5175 | |
| 5176 | /* Start new detached freelist */ |
| 5177 | df->tail = object; |
| 5178 | df->freelist = object; |
| 5179 | df->cnt = 1; |
| 5180 | |
| 5181 | if (is_kfence_address(object)) |
| 5182 | return size; |
| 5183 | |
| 5184 | set_freepointer(df->s, object, NULL); |
| 5185 | |
| 5186 | same = size; |
| 5187 | while (size) { |
| 5188 | object = p[--size]; |
| 5189 | /* df->slab is always set at this point */ |
| 5190 | if (df->slab == virt_to_slab(object)) { |
| 5191 | /* Opportunity build freelist */ |
| 5192 | set_freepointer(df->s, object, df->freelist); |
| 5193 | df->freelist = object; |
| 5194 | df->cnt++; |
| 5195 | same--; |
| 5196 | if (size != same) |
| 5197 | swap(p[size], p[same]); |
| 5198 | continue; |
| 5199 | } |
| 5200 | |
| 5201 | /* Limit look ahead search */ |
| 5202 | if (!--lookahead) |
| 5203 | break; |
| 5204 | } |
| 5205 | |
| 5206 | return same; |
| 5207 | } |
| 5208 | |
| 5209 | /* |
| 5210 | * Internal bulk free of objects that were not initialised by the post alloc |
| 5211 | * hooks and thus should not be processed by the free hooks |
| 5212 | */ |
| 5213 | static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) |
| 5214 | { |
| 5215 | if (!size) |
| 5216 | return; |
| 5217 | |
| 5218 | do { |
| 5219 | struct detached_freelist df; |
| 5220 | |
| 5221 | size = build_detached_freelist(s, size, p, &df); |
| 5222 | if (!df.slab) |
| 5223 | continue; |
| 5224 | |
| 5225 | if (kfence_free(df.freelist)) |
| 5226 | continue; |
| 5227 | |
| 5228 | do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, |
| 5229 | _RET_IP_); |
| 5230 | } while (likely(size)); |
| 5231 | } |
| 5232 | |
| 5233 | /* Note that interrupts must be enabled when calling this function. */ |
| 5234 | void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) |
| 5235 | { |
| 5236 | if (!size) |
| 5237 | return; |
| 5238 | |
| 5239 | do { |
| 5240 | struct detached_freelist df; |
| 5241 | |
| 5242 | size = build_detached_freelist(s, size, p, &df); |
| 5243 | if (!df.slab) |
| 5244 | continue; |
| 5245 | |
| 5246 | slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], |
| 5247 | df.cnt, _RET_IP_); |
| 5248 | } while (likely(size)); |
| 5249 | } |
| 5250 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 5251 | |
| 5252 | #ifndef CONFIG_SLUB_TINY |
| 5253 | static inline |
| 5254 | int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 5255 | void **p) |
| 5256 | { |
| 5257 | struct kmem_cache_cpu *c; |
| 5258 | unsigned long irqflags; |
| 5259 | int i; |
| 5260 | |
| 5261 | /* |
| 5262 | * Drain objects in the per cpu slab, while disabling local |
| 5263 | * IRQs, which protects against PREEMPT and interrupts |
| 5264 | * handlers invoking normal fastpath. |
| 5265 | */ |
| 5266 | c = slub_get_cpu_ptr(s->cpu_slab); |
| 5267 | local_lock_irqsave(&s->cpu_slab->lock, irqflags); |
| 5268 | |
| 5269 | for (i = 0; i < size; i++) { |
| 5270 | void *object = kfence_alloc(s, s->object_size, flags); |
| 5271 | |
| 5272 | if (unlikely(object)) { |
| 5273 | p[i] = object; |
| 5274 | continue; |
| 5275 | } |
| 5276 | |
| 5277 | object = c->freelist; |
| 5278 | if (unlikely(!object)) { |
| 5279 | /* |
| 5280 | * We may have removed an object from c->freelist using |
| 5281 | * the fastpath in the previous iteration; in that case, |
| 5282 | * c->tid has not been bumped yet. |
| 5283 | * Since ___slab_alloc() may reenable interrupts while |
| 5284 | * allocating memory, we should bump c->tid now. |
| 5285 | */ |
| 5286 | c->tid = next_tid(c->tid); |
| 5287 | |
| 5288 | local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); |
| 5289 | |
| 5290 | /* |
| 5291 | * Invoking slow path likely have side-effect |
| 5292 | * of re-populating per CPU c->freelist |
| 5293 | */ |
| 5294 | p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
| 5295 | _RET_IP_, c, s->object_size); |
| 5296 | if (unlikely(!p[i])) |
| 5297 | goto error; |
| 5298 | |
| 5299 | c = this_cpu_ptr(s->cpu_slab); |
| 5300 | maybe_wipe_obj_freeptr(s, p[i]); |
| 5301 | |
| 5302 | local_lock_irqsave(&s->cpu_slab->lock, irqflags); |
| 5303 | |
| 5304 | continue; /* goto for-loop */ |
| 5305 | } |
| 5306 | c->freelist = get_freepointer(s, object); |
| 5307 | p[i] = object; |
| 5308 | maybe_wipe_obj_freeptr(s, p[i]); |
| 5309 | stat(s, ALLOC_FASTPATH); |
| 5310 | } |
| 5311 | c->tid = next_tid(c->tid); |
| 5312 | local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); |
| 5313 | slub_put_cpu_ptr(s->cpu_slab); |
| 5314 | |
| 5315 | return i; |
| 5316 | |
| 5317 | error: |
| 5318 | slub_put_cpu_ptr(s->cpu_slab); |
| 5319 | __kmem_cache_free_bulk(s, i, p); |
| 5320 | return 0; |
| 5321 | |
| 5322 | } |
| 5323 | #else /* CONFIG_SLUB_TINY */ |
| 5324 | static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, |
| 5325 | size_t size, void **p) |
| 5326 | { |
| 5327 | int i; |
| 5328 | |
| 5329 | for (i = 0; i < size; i++) { |
| 5330 | void *object = kfence_alloc(s, s->object_size, flags); |
| 5331 | |
| 5332 | if (unlikely(object)) { |
| 5333 | p[i] = object; |
| 5334 | continue; |
| 5335 | } |
| 5336 | |
| 5337 | p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, |
| 5338 | _RET_IP_, s->object_size); |
| 5339 | if (unlikely(!p[i])) |
| 5340 | goto error; |
| 5341 | |
| 5342 | maybe_wipe_obj_freeptr(s, p[i]); |
| 5343 | } |
| 5344 | |
| 5345 | return i; |
| 5346 | |
| 5347 | error: |
| 5348 | __kmem_cache_free_bulk(s, i, p); |
| 5349 | return 0; |
| 5350 | } |
| 5351 | #endif /* CONFIG_SLUB_TINY */ |
| 5352 | |
| 5353 | /* Note that interrupts must be enabled when calling this function. */ |
| 5354 | int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, |
| 5355 | void **p) |
| 5356 | { |
| 5357 | int i; |
| 5358 | |
| 5359 | if (!size) |
| 5360 | return 0; |
| 5361 | |
| 5362 | s = slab_pre_alloc_hook(s, flags); |
| 5363 | if (unlikely(!s)) |
| 5364 | return 0; |
| 5365 | |
| 5366 | i = __kmem_cache_alloc_bulk(s, flags, size, p); |
| 5367 | if (unlikely(i == 0)) |
| 5368 | return 0; |
| 5369 | |
| 5370 | /* |
| 5371 | * memcg and kmem_cache debug support and memory initialization. |
| 5372 | * Done outside of the IRQ disabled fastpath loop. |
| 5373 | */ |
| 5374 | if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, |
| 5375 | slab_want_init_on_alloc(flags, s), s->object_size))) { |
| 5376 | return 0; |
| 5377 | } |
| 5378 | return i; |
| 5379 | } |
| 5380 | EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); |
| 5381 | |
| 5382 | |
| 5383 | /* |
| 5384 | * Object placement in a slab is made very easy because we always start at |
| 5385 | * offset 0. If we tune the size of the object to the alignment then we can |
| 5386 | * get the required alignment by putting one properly sized object after |
| 5387 | * another. |
| 5388 | * |
| 5389 | * Notice that the allocation order determines the sizes of the per cpu |
| 5390 | * caches. Each processor has always one slab available for allocations. |
| 5391 | * Increasing the allocation order reduces the number of times that slabs |
| 5392 | * must be moved on and off the partial lists and is therefore a factor in |
| 5393 | * locking overhead. |
| 5394 | */ |
| 5395 | |
| 5396 | /* |
| 5397 | * Minimum / Maximum order of slab pages. This influences locking overhead |
| 5398 | * and slab fragmentation. A higher order reduces the number of partial slabs |
| 5399 | * and increases the number of allocations possible without having to |
| 5400 | * take the list_lock. |
| 5401 | */ |
| 5402 | static unsigned int slub_min_order; |
| 5403 | static unsigned int slub_max_order = |
| 5404 | IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; |
| 5405 | static unsigned int slub_min_objects; |
| 5406 | |
| 5407 | /* |
| 5408 | * Calculate the order of allocation given an slab object size. |
| 5409 | * |
| 5410 | * The order of allocation has significant impact on performance and other |
| 5411 | * system components. Generally order 0 allocations should be preferred since |
| 5412 | * order 0 does not cause fragmentation in the page allocator. Larger objects |
| 5413 | * be problematic to put into order 0 slabs because there may be too much |
| 5414 | * unused space left. We go to a higher order if more than 1/16th of the slab |
| 5415 | * would be wasted. |
| 5416 | * |
| 5417 | * In order to reach satisfactory performance we must ensure that a minimum |
| 5418 | * number of objects is in one slab. Otherwise we may generate too much |
| 5419 | * activity on the partial lists which requires taking the list_lock. This is |
| 5420 | * less a concern for large slabs though which are rarely used. |
| 5421 | * |
| 5422 | * slab_max_order specifies the order where we begin to stop considering the |
| 5423 | * number of objects in a slab as critical. If we reach slab_max_order then |
| 5424 | * we try to keep the page order as low as possible. So we accept more waste |
| 5425 | * of space in favor of a small page order. |
| 5426 | * |
| 5427 | * Higher order allocations also allow the placement of more objects in a |
| 5428 | * slab and thereby reduce object handling overhead. If the user has |
| 5429 | * requested a higher minimum order then we start with that one instead of |
| 5430 | * the smallest order which will fit the object. |
| 5431 | */ |
| 5432 | static inline unsigned int calc_slab_order(unsigned int size, |
| 5433 | unsigned int min_order, unsigned int max_order, |
| 5434 | unsigned int fract_leftover) |
| 5435 | { |
| 5436 | unsigned int order; |
| 5437 | |
| 5438 | for (order = min_order; order <= max_order; order++) { |
| 5439 | |
| 5440 | unsigned int slab_size = (unsigned int)PAGE_SIZE << order; |
| 5441 | unsigned int rem; |
| 5442 | |
| 5443 | rem = slab_size % size; |
| 5444 | |
| 5445 | if (rem <= slab_size / fract_leftover) |
| 5446 | break; |
| 5447 | } |
| 5448 | |
| 5449 | return order; |
| 5450 | } |
| 5451 | |
| 5452 | static inline int calculate_order(unsigned int size) |
| 5453 | { |
| 5454 | unsigned int order; |
| 5455 | unsigned int min_objects; |
| 5456 | unsigned int max_objects; |
| 5457 | unsigned int min_order; |
| 5458 | |
| 5459 | min_objects = slub_min_objects; |
| 5460 | if (!min_objects) { |
| 5461 | /* |
| 5462 | * Some architectures will only update present cpus when |
| 5463 | * onlining them, so don't trust the number if it's just 1. But |
| 5464 | * we also don't want to use nr_cpu_ids always, as on some other |
| 5465 | * architectures, there can be many possible cpus, but never |
| 5466 | * onlined. Here we compromise between trying to avoid too high |
| 5467 | * order on systems that appear larger than they are, and too |
| 5468 | * low order on systems that appear smaller than they are. |
| 5469 | */ |
| 5470 | unsigned int nr_cpus = num_present_cpus(); |
| 5471 | if (nr_cpus <= 1) |
| 5472 | nr_cpus = nr_cpu_ids; |
| 5473 | min_objects = 4 * (fls(nr_cpus) + 1); |
| 5474 | } |
| 5475 | /* min_objects can't be 0 because get_order(0) is undefined */ |
| 5476 | max_objects = max(order_objects(slub_max_order, size), 1U); |
| 5477 | min_objects = min(min_objects, max_objects); |
| 5478 | |
| 5479 | min_order = max_t(unsigned int, slub_min_order, |
| 5480 | get_order(min_objects * size)); |
| 5481 | if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) |
| 5482 | return get_order(size * MAX_OBJS_PER_PAGE) - 1; |
| 5483 | |
| 5484 | /* |
| 5485 | * Attempt to find best configuration for a slab. This works by first |
| 5486 | * attempting to generate a layout with the best possible configuration |
| 5487 | * and backing off gradually. |
| 5488 | * |
| 5489 | * We start with accepting at most 1/16 waste and try to find the |
| 5490 | * smallest order from min_objects-derived/slab_min_order up to |
| 5491 | * slab_max_order that will satisfy the constraint. Note that increasing |
| 5492 | * the order can only result in same or less fractional waste, not more. |
| 5493 | * |
| 5494 | * If that fails, we increase the acceptable fraction of waste and try |
| 5495 | * again. The last iteration with fraction of 1/2 would effectively |
| 5496 | * accept any waste and give us the order determined by min_objects, as |
| 5497 | * long as at least single object fits within slab_max_order. |
| 5498 | */ |
| 5499 | for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { |
| 5500 | order = calc_slab_order(size, min_order, slub_max_order, |
| 5501 | fraction); |
| 5502 | if (order <= slub_max_order) |
| 5503 | return order; |
| 5504 | } |
| 5505 | |
| 5506 | /* |
| 5507 | * Doh this slab cannot be placed using slab_max_order. |
| 5508 | */ |
| 5509 | order = get_order(size); |
| 5510 | if (order <= MAX_PAGE_ORDER) |
| 5511 | return order; |
| 5512 | return -ENOSYS; |
| 5513 | } |
| 5514 | |
| 5515 | static void |
| 5516 | init_kmem_cache_node(struct kmem_cache_node *n) |
| 5517 | { |
| 5518 | n->nr_partial = 0; |
| 5519 | spin_lock_init(&n->list_lock); |
| 5520 | INIT_LIST_HEAD(&n->partial); |
| 5521 | #ifdef CONFIG_SLUB_DEBUG |
| 5522 | atomic_long_set(&n->nr_slabs, 0); |
| 5523 | atomic_long_set(&n->total_objects, 0); |
| 5524 | INIT_LIST_HEAD(&n->full); |
| 5525 | #endif |
| 5526 | } |
| 5527 | |
| 5528 | #ifndef CONFIG_SLUB_TINY |
| 5529 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
| 5530 | { |
| 5531 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
| 5532 | NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * |
| 5533 | sizeof(struct kmem_cache_cpu)); |
| 5534 | |
| 5535 | /* |
| 5536 | * Must align to double word boundary for the double cmpxchg |
| 5537 | * instructions to work; see __pcpu_double_call_return_bool(). |
| 5538 | */ |
| 5539 | s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), |
| 5540 | 2 * sizeof(void *)); |
| 5541 | |
| 5542 | if (!s->cpu_slab) |
| 5543 | return 0; |
| 5544 | |
| 5545 | init_kmem_cache_cpus(s); |
| 5546 | |
| 5547 | return 1; |
| 5548 | } |
| 5549 | #else |
| 5550 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
| 5551 | { |
| 5552 | return 1; |
| 5553 | } |
| 5554 | #endif /* CONFIG_SLUB_TINY */ |
| 5555 | |
| 5556 | static struct kmem_cache *kmem_cache_node; |
| 5557 | |
| 5558 | /* |
| 5559 | * No kmalloc_node yet so do it by hand. We know that this is the first |
| 5560 | * slab on the node for this slabcache. There are no concurrent accesses |
| 5561 | * possible. |
| 5562 | * |
| 5563 | * Note that this function only works on the kmem_cache_node |
| 5564 | * when allocating for the kmem_cache_node. This is used for bootstrapping |
| 5565 | * memory on a fresh node that has no slab structures yet. |
| 5566 | */ |
| 5567 | static void early_kmem_cache_node_alloc(int node) |
| 5568 | { |
| 5569 | struct slab *slab; |
| 5570 | struct kmem_cache_node *n; |
| 5571 | |
| 5572 | BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); |
| 5573 | |
| 5574 | slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); |
| 5575 | |
| 5576 | BUG_ON(!slab); |
| 5577 | if (slab_nid(slab) != node) { |
| 5578 | pr_err("SLUB: Unable to allocate memory from node %d\n", node); |
| 5579 | pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); |
| 5580 | } |
| 5581 | |
| 5582 | n = slab->freelist; |
| 5583 | BUG_ON(!n); |
| 5584 | #ifdef CONFIG_SLUB_DEBUG |
| 5585 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
| 5586 | #endif |
| 5587 | n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); |
| 5588 | slab->freelist = get_freepointer(kmem_cache_node, n); |
| 5589 | slab->inuse = 1; |
| 5590 | kmem_cache_node->node[node] = n; |
| 5591 | init_kmem_cache_node(n); |
| 5592 | inc_slabs_node(kmem_cache_node, node, slab->objects); |
| 5593 | |
| 5594 | /* |
| 5595 | * No locks need to be taken here as it has just been |
| 5596 | * initialized and there is no concurrent access. |
| 5597 | */ |
| 5598 | __add_partial(n, slab, DEACTIVATE_TO_HEAD); |
| 5599 | } |
| 5600 | |
| 5601 | static void free_kmem_cache_nodes(struct kmem_cache *s) |
| 5602 | { |
| 5603 | int node; |
| 5604 | struct kmem_cache_node *n; |
| 5605 | |
| 5606 | for_each_kmem_cache_node(s, node, n) { |
| 5607 | s->node[node] = NULL; |
| 5608 | kmem_cache_free(kmem_cache_node, n); |
| 5609 | } |
| 5610 | } |
| 5611 | |
| 5612 | void __kmem_cache_release(struct kmem_cache *s) |
| 5613 | { |
| 5614 | cache_random_seq_destroy(s); |
| 5615 | #ifndef CONFIG_SLUB_TINY |
| 5616 | free_percpu(s->cpu_slab); |
| 5617 | #endif |
| 5618 | free_kmem_cache_nodes(s); |
| 5619 | } |
| 5620 | |
| 5621 | static int init_kmem_cache_nodes(struct kmem_cache *s) |
| 5622 | { |
| 5623 | int node; |
| 5624 | |
| 5625 | for_each_node_mask(node, slab_nodes) { |
| 5626 | struct kmem_cache_node *n; |
| 5627 | |
| 5628 | if (slab_state == DOWN) { |
| 5629 | early_kmem_cache_node_alloc(node); |
| 5630 | continue; |
| 5631 | } |
| 5632 | n = kmem_cache_alloc_node(kmem_cache_node, |
| 5633 | GFP_KERNEL, node); |
| 5634 | |
| 5635 | if (!n) { |
| 5636 | free_kmem_cache_nodes(s); |
| 5637 | return 0; |
| 5638 | } |
| 5639 | |
| 5640 | init_kmem_cache_node(n); |
| 5641 | s->node[node] = n; |
| 5642 | } |
| 5643 | return 1; |
| 5644 | } |
| 5645 | |
| 5646 | static void set_cpu_partial(struct kmem_cache *s) |
| 5647 | { |
| 5648 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 5649 | unsigned int nr_objects; |
| 5650 | |
| 5651 | /* |
| 5652 | * cpu_partial determined the maximum number of objects kept in the |
| 5653 | * per cpu partial lists of a processor. |
| 5654 | * |
| 5655 | * Per cpu partial lists mainly contain slabs that just have one |
| 5656 | * object freed. If they are used for allocation then they can be |
| 5657 | * filled up again with minimal effort. The slab will never hit the |
| 5658 | * per node partial lists and therefore no locking will be required. |
| 5659 | * |
| 5660 | * For backwards compatibility reasons, this is determined as number |
| 5661 | * of objects, even though we now limit maximum number of pages, see |
| 5662 | * slub_set_cpu_partial() |
| 5663 | */ |
| 5664 | if (!kmem_cache_has_cpu_partial(s)) |
| 5665 | nr_objects = 0; |
| 5666 | else if (s->size >= PAGE_SIZE) |
| 5667 | nr_objects = 6; |
| 5668 | else if (s->size >= 1024) |
| 5669 | nr_objects = 24; |
| 5670 | else if (s->size >= 256) |
| 5671 | nr_objects = 52; |
| 5672 | else |
| 5673 | nr_objects = 120; |
| 5674 | |
| 5675 | slub_set_cpu_partial(s, nr_objects); |
| 5676 | #endif |
| 5677 | } |
| 5678 | |
| 5679 | /* |
| 5680 | * calculate_sizes() determines the order and the distribution of data within |
| 5681 | * a slab object. |
| 5682 | */ |
| 5683 | static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) |
| 5684 | { |
| 5685 | slab_flags_t flags = s->flags; |
| 5686 | unsigned int size = s->object_size; |
| 5687 | unsigned int order; |
| 5688 | |
| 5689 | /* |
| 5690 | * Round up object size to the next word boundary. We can only |
| 5691 | * place the free pointer at word boundaries and this determines |
| 5692 | * the possible location of the free pointer. |
| 5693 | */ |
| 5694 | size = ALIGN(size, sizeof(void *)); |
| 5695 | |
| 5696 | #ifdef CONFIG_SLUB_DEBUG |
| 5697 | /* |
| 5698 | * Determine if we can poison the object itself. If the user of |
| 5699 | * the slab may touch the object after free or before allocation |
| 5700 | * then we should never poison the object itself. |
| 5701 | */ |
| 5702 | if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && |
| 5703 | !s->ctor) |
| 5704 | s->flags |= __OBJECT_POISON; |
| 5705 | else |
| 5706 | s->flags &= ~__OBJECT_POISON; |
| 5707 | |
| 5708 | |
| 5709 | /* |
| 5710 | * If we are Redzoning then check if there is some space between the |
| 5711 | * end of the object and the free pointer. If not then add an |
| 5712 | * additional word to have some bytes to store Redzone information. |
| 5713 | */ |
| 5714 | if ((flags & SLAB_RED_ZONE) && size == s->object_size) |
| 5715 | size += sizeof(void *); |
| 5716 | #endif |
| 5717 | |
| 5718 | /* |
| 5719 | * With that we have determined the number of bytes in actual use |
| 5720 | * by the object and redzoning. |
| 5721 | */ |
| 5722 | s->inuse = size; |
| 5723 | |
| 5724 | if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || |
| 5725 | (flags & SLAB_POISON) || s->ctor || |
| 5726 | ((flags & SLAB_RED_ZONE) && |
| 5727 | (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { |
| 5728 | /* |
| 5729 | * Relocate free pointer after the object if it is not |
| 5730 | * permitted to overwrite the first word of the object on |
| 5731 | * kmem_cache_free. |
| 5732 | * |
| 5733 | * This is the case if we do RCU, have a constructor or |
| 5734 | * destructor, are poisoning the objects, or are |
| 5735 | * redzoning an object smaller than sizeof(void *) or are |
| 5736 | * redzoning an object with slub_debug_orig_size() enabled, |
| 5737 | * in which case the right redzone may be extended. |
| 5738 | * |
| 5739 | * The assumption that s->offset >= s->inuse means free |
| 5740 | * pointer is outside of the object is used in the |
| 5741 | * freeptr_outside_object() function. If that is no |
| 5742 | * longer true, the function needs to be modified. |
| 5743 | */ |
| 5744 | s->offset = size; |
| 5745 | size += sizeof(void *); |
| 5746 | } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { |
| 5747 | s->offset = args->freeptr_offset; |
| 5748 | } else { |
| 5749 | /* |
| 5750 | * Store freelist pointer near middle of object to keep |
| 5751 | * it away from the edges of the object to avoid small |
| 5752 | * sized over/underflows from neighboring allocations. |
| 5753 | */ |
| 5754 | s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); |
| 5755 | } |
| 5756 | |
| 5757 | #ifdef CONFIG_SLUB_DEBUG |
| 5758 | if (flags & SLAB_STORE_USER) { |
| 5759 | /* |
| 5760 | * Need to store information about allocs and frees after |
| 5761 | * the object. |
| 5762 | */ |
| 5763 | size += 2 * sizeof(struct track); |
| 5764 | |
| 5765 | /* Save the original kmalloc request size */ |
| 5766 | if (flags & SLAB_KMALLOC) |
| 5767 | size += sizeof(unsigned int); |
| 5768 | } |
| 5769 | #endif |
| 5770 | |
| 5771 | kasan_cache_create(s, &size, &s->flags); |
| 5772 | #ifdef CONFIG_SLUB_DEBUG |
| 5773 | if (flags & SLAB_RED_ZONE) { |
| 5774 | /* |
| 5775 | * Add some empty padding so that we can catch |
| 5776 | * overwrites from earlier objects rather than let |
| 5777 | * tracking information or the free pointer be |
| 5778 | * corrupted if a user writes before the start |
| 5779 | * of the object. |
| 5780 | */ |
| 5781 | size += sizeof(void *); |
| 5782 | |
| 5783 | s->red_left_pad = sizeof(void *); |
| 5784 | s->red_left_pad = ALIGN(s->red_left_pad, s->align); |
| 5785 | size += s->red_left_pad; |
| 5786 | } |
| 5787 | #endif |
| 5788 | |
| 5789 | /* |
| 5790 | * SLUB stores one object immediately after another beginning from |
| 5791 | * offset 0. In order to align the objects we have to simply size |
| 5792 | * each object to conform to the alignment. |
| 5793 | */ |
| 5794 | size = ALIGN(size, s->align); |
| 5795 | s->size = size; |
| 5796 | s->reciprocal_size = reciprocal_value(size); |
| 5797 | order = calculate_order(size); |
| 5798 | |
| 5799 | if ((int)order < 0) |
| 5800 | return 0; |
| 5801 | |
| 5802 | s->allocflags = __GFP_COMP; |
| 5803 | |
| 5804 | if (s->flags & SLAB_CACHE_DMA) |
| 5805 | s->allocflags |= GFP_DMA; |
| 5806 | |
| 5807 | if (s->flags & SLAB_CACHE_DMA32) |
| 5808 | s->allocflags |= GFP_DMA32; |
| 5809 | |
| 5810 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
| 5811 | s->allocflags |= __GFP_RECLAIMABLE; |
| 5812 | |
| 5813 | /* |
| 5814 | * Determine the number of objects per slab |
| 5815 | */ |
| 5816 | s->oo = oo_make(order, size); |
| 5817 | s->min = oo_make(get_order(size), size); |
| 5818 | |
| 5819 | return !!oo_objects(s->oo); |
| 5820 | } |
| 5821 | |
| 5822 | static void list_slab_objects(struct kmem_cache *s, struct slab *slab) |
| 5823 | { |
| 5824 | #ifdef CONFIG_SLUB_DEBUG |
| 5825 | void *addr = slab_address(slab); |
| 5826 | void *p; |
| 5827 | |
| 5828 | if (!slab_add_kunit_errors()) |
| 5829 | slab_bug(s, "Objects remaining on __kmem_cache_shutdown()"); |
| 5830 | |
| 5831 | spin_lock(&object_map_lock); |
| 5832 | __fill_map(object_map, s, slab); |
| 5833 | |
| 5834 | for_each_object(p, s, addr, slab->objects) { |
| 5835 | |
| 5836 | if (!test_bit(__obj_to_index(s, addr, p), object_map)) { |
| 5837 | if (slab_add_kunit_errors()) |
| 5838 | continue; |
| 5839 | pr_err("Object 0x%p @offset=%tu\n", p, p - addr); |
| 5840 | print_tracking(s, p); |
| 5841 | } |
| 5842 | } |
| 5843 | spin_unlock(&object_map_lock); |
| 5844 | |
| 5845 | __slab_err(slab); |
| 5846 | #endif |
| 5847 | } |
| 5848 | |
| 5849 | /* |
| 5850 | * Attempt to free all partial slabs on a node. |
| 5851 | * This is called from __kmem_cache_shutdown(). We must take list_lock |
| 5852 | * because sysfs file might still access partial list after the shutdowning. |
| 5853 | */ |
| 5854 | static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) |
| 5855 | { |
| 5856 | LIST_HEAD(discard); |
| 5857 | struct slab *slab, *h; |
| 5858 | |
| 5859 | BUG_ON(irqs_disabled()); |
| 5860 | spin_lock_irq(&n->list_lock); |
| 5861 | list_for_each_entry_safe(slab, h, &n->partial, slab_list) { |
| 5862 | if (!slab->inuse) { |
| 5863 | remove_partial(n, slab); |
| 5864 | list_add(&slab->slab_list, &discard); |
| 5865 | } else { |
| 5866 | list_slab_objects(s, slab); |
| 5867 | } |
| 5868 | } |
| 5869 | spin_unlock_irq(&n->list_lock); |
| 5870 | |
| 5871 | list_for_each_entry_safe(slab, h, &discard, slab_list) |
| 5872 | discard_slab(s, slab); |
| 5873 | } |
| 5874 | |
| 5875 | bool __kmem_cache_empty(struct kmem_cache *s) |
| 5876 | { |
| 5877 | int node; |
| 5878 | struct kmem_cache_node *n; |
| 5879 | |
| 5880 | for_each_kmem_cache_node(s, node, n) |
| 5881 | if (n->nr_partial || node_nr_slabs(n)) |
| 5882 | return false; |
| 5883 | return true; |
| 5884 | } |
| 5885 | |
| 5886 | /* |
| 5887 | * Release all resources used by a slab cache. |
| 5888 | */ |
| 5889 | int __kmem_cache_shutdown(struct kmem_cache *s) |
| 5890 | { |
| 5891 | int node; |
| 5892 | struct kmem_cache_node *n; |
| 5893 | |
| 5894 | flush_all_cpus_locked(s); |
| 5895 | /* Attempt to free all objects */ |
| 5896 | for_each_kmem_cache_node(s, node, n) { |
| 5897 | free_partial(s, n); |
| 5898 | if (n->nr_partial || node_nr_slabs(n)) |
| 5899 | return 1; |
| 5900 | } |
| 5901 | return 0; |
| 5902 | } |
| 5903 | |
| 5904 | #ifdef CONFIG_PRINTK |
| 5905 | void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) |
| 5906 | { |
| 5907 | void *base; |
| 5908 | int __maybe_unused i; |
| 5909 | unsigned int objnr; |
| 5910 | void *objp; |
| 5911 | void *objp0; |
| 5912 | struct kmem_cache *s = slab->slab_cache; |
| 5913 | struct track __maybe_unused *trackp; |
| 5914 | |
| 5915 | kpp->kp_ptr = object; |
| 5916 | kpp->kp_slab = slab; |
| 5917 | kpp->kp_slab_cache = s; |
| 5918 | base = slab_address(slab); |
| 5919 | objp0 = kasan_reset_tag(object); |
| 5920 | #ifdef CONFIG_SLUB_DEBUG |
| 5921 | objp = restore_red_left(s, objp0); |
| 5922 | #else |
| 5923 | objp = objp0; |
| 5924 | #endif |
| 5925 | objnr = obj_to_index(s, slab, objp); |
| 5926 | kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); |
| 5927 | objp = base + s->size * objnr; |
| 5928 | kpp->kp_objp = objp; |
| 5929 | if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size |
| 5930 | || (objp - base) % s->size) || |
| 5931 | !(s->flags & SLAB_STORE_USER)) |
| 5932 | return; |
| 5933 | #ifdef CONFIG_SLUB_DEBUG |
| 5934 | objp = fixup_red_left(s, objp); |
| 5935 | trackp = get_track(s, objp, TRACK_ALLOC); |
| 5936 | kpp->kp_ret = (void *)trackp->addr; |
| 5937 | #ifdef CONFIG_STACKDEPOT |
| 5938 | { |
| 5939 | depot_stack_handle_t handle; |
| 5940 | unsigned long *entries; |
| 5941 | unsigned int nr_entries; |
| 5942 | |
| 5943 | handle = READ_ONCE(trackp->handle); |
| 5944 | if (handle) { |
| 5945 | nr_entries = stack_depot_fetch(handle, &entries); |
| 5946 | for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) |
| 5947 | kpp->kp_stack[i] = (void *)entries[i]; |
| 5948 | } |
| 5949 | |
| 5950 | trackp = get_track(s, objp, TRACK_FREE); |
| 5951 | handle = READ_ONCE(trackp->handle); |
| 5952 | if (handle) { |
| 5953 | nr_entries = stack_depot_fetch(handle, &entries); |
| 5954 | for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) |
| 5955 | kpp->kp_free_stack[i] = (void *)entries[i]; |
| 5956 | } |
| 5957 | } |
| 5958 | #endif |
| 5959 | #endif |
| 5960 | } |
| 5961 | #endif |
| 5962 | |
| 5963 | /******************************************************************** |
| 5964 | * Kmalloc subsystem |
| 5965 | *******************************************************************/ |
| 5966 | |
| 5967 | static int __init setup_slub_min_order(char *str) |
| 5968 | { |
| 5969 | get_option(&str, (int *)&slub_min_order); |
| 5970 | |
| 5971 | if (slub_min_order > slub_max_order) |
| 5972 | slub_max_order = slub_min_order; |
| 5973 | |
| 5974 | return 1; |
| 5975 | } |
| 5976 | |
| 5977 | __setup("slab_min_order=", setup_slub_min_order); |
| 5978 | __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); |
| 5979 | |
| 5980 | |
| 5981 | static int __init setup_slub_max_order(char *str) |
| 5982 | { |
| 5983 | get_option(&str, (int *)&slub_max_order); |
| 5984 | slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); |
| 5985 | |
| 5986 | if (slub_min_order > slub_max_order) |
| 5987 | slub_min_order = slub_max_order; |
| 5988 | |
| 5989 | return 1; |
| 5990 | } |
| 5991 | |
| 5992 | __setup("slab_max_order=", setup_slub_max_order); |
| 5993 | __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); |
| 5994 | |
| 5995 | static int __init setup_slub_min_objects(char *str) |
| 5996 | { |
| 5997 | get_option(&str, (int *)&slub_min_objects); |
| 5998 | |
| 5999 | return 1; |
| 6000 | } |
| 6001 | |
| 6002 | __setup("slab_min_objects=", setup_slub_min_objects); |
| 6003 | __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); |
| 6004 | |
| 6005 | #ifdef CONFIG_NUMA |
| 6006 | static int __init setup_slab_strict_numa(char *str) |
| 6007 | { |
| 6008 | if (nr_node_ids > 1) { |
| 6009 | static_branch_enable(&strict_numa); |
| 6010 | pr_info("SLUB: Strict NUMA enabled.\n"); |
| 6011 | } else { |
| 6012 | pr_warn("slab_strict_numa parameter set on non NUMA system.\n"); |
| 6013 | } |
| 6014 | |
| 6015 | return 1; |
| 6016 | } |
| 6017 | |
| 6018 | __setup("slab_strict_numa", setup_slab_strict_numa); |
| 6019 | #endif |
| 6020 | |
| 6021 | |
| 6022 | #ifdef CONFIG_HARDENED_USERCOPY |
| 6023 | /* |
| 6024 | * Rejects incorrectly sized objects and objects that are to be copied |
| 6025 | * to/from userspace but do not fall entirely within the containing slab |
| 6026 | * cache's usercopy region. |
| 6027 | * |
| 6028 | * Returns NULL if check passes, otherwise const char * to name of cache |
| 6029 | * to indicate an error. |
| 6030 | */ |
| 6031 | void __check_heap_object(const void *ptr, unsigned long n, |
| 6032 | const struct slab *slab, bool to_user) |
| 6033 | { |
| 6034 | struct kmem_cache *s; |
| 6035 | unsigned int offset; |
| 6036 | bool is_kfence = is_kfence_address(ptr); |
| 6037 | |
| 6038 | ptr = kasan_reset_tag(ptr); |
| 6039 | |
| 6040 | /* Find object and usable object size. */ |
| 6041 | s = slab->slab_cache; |
| 6042 | |
| 6043 | /* Reject impossible pointers. */ |
| 6044 | if (ptr < slab_address(slab)) |
| 6045 | usercopy_abort("SLUB object not in SLUB page?!", NULL, |
| 6046 | to_user, 0, n); |
| 6047 | |
| 6048 | /* Find offset within object. */ |
| 6049 | if (is_kfence) |
| 6050 | offset = ptr - kfence_object_start(ptr); |
| 6051 | else |
| 6052 | offset = (ptr - slab_address(slab)) % s->size; |
| 6053 | |
| 6054 | /* Adjust for redzone and reject if within the redzone. */ |
| 6055 | if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { |
| 6056 | if (offset < s->red_left_pad) |
| 6057 | usercopy_abort("SLUB object in left red zone", |
| 6058 | s->name, to_user, offset, n); |
| 6059 | offset -= s->red_left_pad; |
| 6060 | } |
| 6061 | |
| 6062 | /* Allow address range falling entirely within usercopy region. */ |
| 6063 | if (offset >= s->useroffset && |
| 6064 | offset - s->useroffset <= s->usersize && |
| 6065 | n <= s->useroffset - offset + s->usersize) |
| 6066 | return; |
| 6067 | |
| 6068 | usercopy_abort("SLUB object", s->name, to_user, offset, n); |
| 6069 | } |
| 6070 | #endif /* CONFIG_HARDENED_USERCOPY */ |
| 6071 | |
| 6072 | #define SHRINK_PROMOTE_MAX 32 |
| 6073 | |
| 6074 | /* |
| 6075 | * kmem_cache_shrink discards empty slabs and promotes the slabs filled |
| 6076 | * up most to the head of the partial lists. New allocations will then |
| 6077 | * fill those up and thus they can be removed from the partial lists. |
| 6078 | * |
| 6079 | * The slabs with the least items are placed last. This results in them |
| 6080 | * being allocated from last increasing the chance that the last objects |
| 6081 | * are freed in them. |
| 6082 | */ |
| 6083 | static int __kmem_cache_do_shrink(struct kmem_cache *s) |
| 6084 | { |
| 6085 | int node; |
| 6086 | int i; |
| 6087 | struct kmem_cache_node *n; |
| 6088 | struct slab *slab; |
| 6089 | struct slab *t; |
| 6090 | struct list_head discard; |
| 6091 | struct list_head promote[SHRINK_PROMOTE_MAX]; |
| 6092 | unsigned long flags; |
| 6093 | int ret = 0; |
| 6094 | |
| 6095 | for_each_kmem_cache_node(s, node, n) { |
| 6096 | INIT_LIST_HEAD(&discard); |
| 6097 | for (i = 0; i < SHRINK_PROMOTE_MAX; i++) |
| 6098 | INIT_LIST_HEAD(promote + i); |
| 6099 | |
| 6100 | spin_lock_irqsave(&n->list_lock, flags); |
| 6101 | |
| 6102 | /* |
| 6103 | * Build lists of slabs to discard or promote. |
| 6104 | * |
| 6105 | * Note that concurrent frees may occur while we hold the |
| 6106 | * list_lock. slab->inuse here is the upper limit. |
| 6107 | */ |
| 6108 | list_for_each_entry_safe(slab, t, &n->partial, slab_list) { |
| 6109 | int free = slab->objects - slab->inuse; |
| 6110 | |
| 6111 | /* Do not reread slab->inuse */ |
| 6112 | barrier(); |
| 6113 | |
| 6114 | /* We do not keep full slabs on the list */ |
| 6115 | BUG_ON(free <= 0); |
| 6116 | |
| 6117 | if (free == slab->objects) { |
| 6118 | list_move(&slab->slab_list, &discard); |
| 6119 | slab_clear_node_partial(slab); |
| 6120 | n->nr_partial--; |
| 6121 | dec_slabs_node(s, node, slab->objects); |
| 6122 | } else if (free <= SHRINK_PROMOTE_MAX) |
| 6123 | list_move(&slab->slab_list, promote + free - 1); |
| 6124 | } |
| 6125 | |
| 6126 | /* |
| 6127 | * Promote the slabs filled up most to the head of the |
| 6128 | * partial list. |
| 6129 | */ |
| 6130 | for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) |
| 6131 | list_splice(promote + i, &n->partial); |
| 6132 | |
| 6133 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 6134 | |
| 6135 | /* Release empty slabs */ |
| 6136 | list_for_each_entry_safe(slab, t, &discard, slab_list) |
| 6137 | free_slab(s, slab); |
| 6138 | |
| 6139 | if (node_nr_slabs(n)) |
| 6140 | ret = 1; |
| 6141 | } |
| 6142 | |
| 6143 | return ret; |
| 6144 | } |
| 6145 | |
| 6146 | int __kmem_cache_shrink(struct kmem_cache *s) |
| 6147 | { |
| 6148 | flush_all(s); |
| 6149 | return __kmem_cache_do_shrink(s); |
| 6150 | } |
| 6151 | |
| 6152 | static int slab_mem_going_offline_callback(void *arg) |
| 6153 | { |
| 6154 | struct kmem_cache *s; |
| 6155 | |
| 6156 | mutex_lock(&slab_mutex); |
| 6157 | list_for_each_entry(s, &slab_caches, list) { |
| 6158 | flush_all_cpus_locked(s); |
| 6159 | __kmem_cache_do_shrink(s); |
| 6160 | } |
| 6161 | mutex_unlock(&slab_mutex); |
| 6162 | |
| 6163 | return 0; |
| 6164 | } |
| 6165 | |
| 6166 | static void slab_mem_offline_callback(void *arg) |
| 6167 | { |
| 6168 | struct memory_notify *marg = arg; |
| 6169 | int offline_node; |
| 6170 | |
| 6171 | offline_node = marg->status_change_nid_normal; |
| 6172 | |
| 6173 | /* |
| 6174 | * If the node still has available memory. we need kmem_cache_node |
| 6175 | * for it yet. |
| 6176 | */ |
| 6177 | if (offline_node < 0) |
| 6178 | return; |
| 6179 | |
| 6180 | mutex_lock(&slab_mutex); |
| 6181 | node_clear(offline_node, slab_nodes); |
| 6182 | /* |
| 6183 | * We no longer free kmem_cache_node structures here, as it would be |
| 6184 | * racy with all get_node() users, and infeasible to protect them with |
| 6185 | * slab_mutex. |
| 6186 | */ |
| 6187 | mutex_unlock(&slab_mutex); |
| 6188 | } |
| 6189 | |
| 6190 | static int slab_mem_going_online_callback(void *arg) |
| 6191 | { |
| 6192 | struct kmem_cache_node *n; |
| 6193 | struct kmem_cache *s; |
| 6194 | struct memory_notify *marg = arg; |
| 6195 | int nid = marg->status_change_nid_normal; |
| 6196 | int ret = 0; |
| 6197 | |
| 6198 | /* |
| 6199 | * If the node's memory is already available, then kmem_cache_node is |
| 6200 | * already created. Nothing to do. |
| 6201 | */ |
| 6202 | if (nid < 0) |
| 6203 | return 0; |
| 6204 | |
| 6205 | /* |
| 6206 | * We are bringing a node online. No memory is available yet. We must |
| 6207 | * allocate a kmem_cache_node structure in order to bring the node |
| 6208 | * online. |
| 6209 | */ |
| 6210 | mutex_lock(&slab_mutex); |
| 6211 | list_for_each_entry(s, &slab_caches, list) { |
| 6212 | /* |
| 6213 | * The structure may already exist if the node was previously |
| 6214 | * onlined and offlined. |
| 6215 | */ |
| 6216 | if (get_node(s, nid)) |
| 6217 | continue; |
| 6218 | /* |
| 6219 | * XXX: kmem_cache_alloc_node will fallback to other nodes |
| 6220 | * since memory is not yet available from the node that |
| 6221 | * is brought up. |
| 6222 | */ |
| 6223 | n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); |
| 6224 | if (!n) { |
| 6225 | ret = -ENOMEM; |
| 6226 | goto out; |
| 6227 | } |
| 6228 | init_kmem_cache_node(n); |
| 6229 | s->node[nid] = n; |
| 6230 | } |
| 6231 | /* |
| 6232 | * Any cache created after this point will also have kmem_cache_node |
| 6233 | * initialized for the new node. |
| 6234 | */ |
| 6235 | node_set(nid, slab_nodes); |
| 6236 | out: |
| 6237 | mutex_unlock(&slab_mutex); |
| 6238 | return ret; |
| 6239 | } |
| 6240 | |
| 6241 | static int slab_memory_callback(struct notifier_block *self, |
| 6242 | unsigned long action, void *arg) |
| 6243 | { |
| 6244 | int ret = 0; |
| 6245 | |
| 6246 | switch (action) { |
| 6247 | case MEM_GOING_ONLINE: |
| 6248 | ret = slab_mem_going_online_callback(arg); |
| 6249 | break; |
| 6250 | case MEM_GOING_OFFLINE: |
| 6251 | ret = slab_mem_going_offline_callback(arg); |
| 6252 | break; |
| 6253 | case MEM_OFFLINE: |
| 6254 | case MEM_CANCEL_ONLINE: |
| 6255 | slab_mem_offline_callback(arg); |
| 6256 | break; |
| 6257 | case MEM_ONLINE: |
| 6258 | case MEM_CANCEL_OFFLINE: |
| 6259 | break; |
| 6260 | } |
| 6261 | if (ret) |
| 6262 | ret = notifier_from_errno(ret); |
| 6263 | else |
| 6264 | ret = NOTIFY_OK; |
| 6265 | return ret; |
| 6266 | } |
| 6267 | |
| 6268 | /******************************************************************** |
| 6269 | * Basic setup of slabs |
| 6270 | *******************************************************************/ |
| 6271 | |
| 6272 | /* |
| 6273 | * Used for early kmem_cache structures that were allocated using |
| 6274 | * the page allocator. Allocate them properly then fix up the pointers |
| 6275 | * that may be pointing to the wrong kmem_cache structure. |
| 6276 | */ |
| 6277 | |
| 6278 | static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) |
| 6279 | { |
| 6280 | int node; |
| 6281 | struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 6282 | struct kmem_cache_node *n; |
| 6283 | |
| 6284 | memcpy(s, static_cache, kmem_cache->object_size); |
| 6285 | |
| 6286 | /* |
| 6287 | * This runs very early, and only the boot processor is supposed to be |
| 6288 | * up. Even if it weren't true, IRQs are not up so we couldn't fire |
| 6289 | * IPIs around. |
| 6290 | */ |
| 6291 | __flush_cpu_slab(s, smp_processor_id()); |
| 6292 | for_each_kmem_cache_node(s, node, n) { |
| 6293 | struct slab *p; |
| 6294 | |
| 6295 | list_for_each_entry(p, &n->partial, slab_list) |
| 6296 | p->slab_cache = s; |
| 6297 | |
| 6298 | #ifdef CONFIG_SLUB_DEBUG |
| 6299 | list_for_each_entry(p, &n->full, slab_list) |
| 6300 | p->slab_cache = s; |
| 6301 | #endif |
| 6302 | } |
| 6303 | list_add(&s->list, &slab_caches); |
| 6304 | return s; |
| 6305 | } |
| 6306 | |
| 6307 | void __init kmem_cache_init(void) |
| 6308 | { |
| 6309 | static __initdata struct kmem_cache boot_kmem_cache, |
| 6310 | boot_kmem_cache_node; |
| 6311 | int node; |
| 6312 | |
| 6313 | if (debug_guardpage_minorder()) |
| 6314 | slub_max_order = 0; |
| 6315 | |
| 6316 | /* Print slub debugging pointers without hashing */ |
| 6317 | if (__slub_debug_enabled()) |
| 6318 | no_hash_pointers_enable(NULL); |
| 6319 | |
| 6320 | kmem_cache_node = &boot_kmem_cache_node; |
| 6321 | kmem_cache = &boot_kmem_cache; |
| 6322 | |
| 6323 | /* |
| 6324 | * Initialize the nodemask for which we will allocate per node |
| 6325 | * structures. Here we don't need taking slab_mutex yet. |
| 6326 | */ |
| 6327 | for_each_node_state(node, N_NORMAL_MEMORY) |
| 6328 | node_set(node, slab_nodes); |
| 6329 | |
| 6330 | create_boot_cache(kmem_cache_node, "kmem_cache_node", |
| 6331 | sizeof(struct kmem_cache_node), |
| 6332 | SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); |
| 6333 | |
| 6334 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
| 6335 | |
| 6336 | /* Able to allocate the per node structures */ |
| 6337 | slab_state = PARTIAL; |
| 6338 | |
| 6339 | create_boot_cache(kmem_cache, "kmem_cache", |
| 6340 | offsetof(struct kmem_cache, node) + |
| 6341 | nr_node_ids * sizeof(struct kmem_cache_node *), |
| 6342 | SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); |
| 6343 | |
| 6344 | kmem_cache = bootstrap(&boot_kmem_cache); |
| 6345 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); |
| 6346 | |
| 6347 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ |
| 6348 | setup_kmalloc_cache_index_table(); |
| 6349 | create_kmalloc_caches(); |
| 6350 | |
| 6351 | /* Setup random freelists for each cache */ |
| 6352 | init_freelist_randomization(); |
| 6353 | |
| 6354 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, |
| 6355 | slub_cpu_dead); |
| 6356 | |
| 6357 | pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", |
| 6358 | cache_line_size(), |
| 6359 | slub_min_order, slub_max_order, slub_min_objects, |
| 6360 | nr_cpu_ids, nr_node_ids); |
| 6361 | } |
| 6362 | |
| 6363 | void __init kmem_cache_init_late(void) |
| 6364 | { |
| 6365 | #ifndef CONFIG_SLUB_TINY |
| 6366 | flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); |
| 6367 | WARN_ON(!flushwq); |
| 6368 | #endif |
| 6369 | } |
| 6370 | |
| 6371 | struct kmem_cache * |
| 6372 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
| 6373 | slab_flags_t flags, void (*ctor)(void *)) |
| 6374 | { |
| 6375 | struct kmem_cache *s; |
| 6376 | |
| 6377 | s = find_mergeable(size, align, flags, name, ctor); |
| 6378 | if (s) { |
| 6379 | if (sysfs_slab_alias(s, name)) |
| 6380 | pr_err("SLUB: Unable to add cache alias %s to sysfs\n", |
| 6381 | name); |
| 6382 | |
| 6383 | s->refcount++; |
| 6384 | |
| 6385 | /* |
| 6386 | * Adjust the object sizes so that we clear |
| 6387 | * the complete object on kzalloc. |
| 6388 | */ |
| 6389 | s->object_size = max(s->object_size, size); |
| 6390 | s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); |
| 6391 | } |
| 6392 | |
| 6393 | return s; |
| 6394 | } |
| 6395 | |
| 6396 | int do_kmem_cache_create(struct kmem_cache *s, const char *name, |
| 6397 | unsigned int size, struct kmem_cache_args *args, |
| 6398 | slab_flags_t flags) |
| 6399 | { |
| 6400 | int err = -EINVAL; |
| 6401 | |
| 6402 | s->name = name; |
| 6403 | s->size = s->object_size = size; |
| 6404 | |
| 6405 | s->flags = kmem_cache_flags(flags, s->name); |
| 6406 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| 6407 | s->random = get_random_long(); |
| 6408 | #endif |
| 6409 | s->align = args->align; |
| 6410 | s->ctor = args->ctor; |
| 6411 | #ifdef CONFIG_HARDENED_USERCOPY |
| 6412 | s->useroffset = args->useroffset; |
| 6413 | s->usersize = args->usersize; |
| 6414 | #endif |
| 6415 | |
| 6416 | if (!calculate_sizes(args, s)) |
| 6417 | goto out; |
| 6418 | if (disable_higher_order_debug) { |
| 6419 | /* |
| 6420 | * Disable debugging flags that store metadata if the min slab |
| 6421 | * order increased. |
| 6422 | */ |
| 6423 | if (get_order(s->size) > get_order(s->object_size)) { |
| 6424 | s->flags &= ~DEBUG_METADATA_FLAGS; |
| 6425 | s->offset = 0; |
| 6426 | if (!calculate_sizes(args, s)) |
| 6427 | goto out; |
| 6428 | } |
| 6429 | } |
| 6430 | |
| 6431 | #ifdef system_has_freelist_aba |
| 6432 | if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { |
| 6433 | /* Enable fast mode */ |
| 6434 | s->flags |= __CMPXCHG_DOUBLE; |
| 6435 | } |
| 6436 | #endif |
| 6437 | |
| 6438 | /* |
| 6439 | * The larger the object size is, the more slabs we want on the partial |
| 6440 | * list to avoid pounding the page allocator excessively. |
| 6441 | */ |
| 6442 | s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); |
| 6443 | s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); |
| 6444 | |
| 6445 | set_cpu_partial(s); |
| 6446 | |
| 6447 | #ifdef CONFIG_NUMA |
| 6448 | s->remote_node_defrag_ratio = 1000; |
| 6449 | #endif |
| 6450 | |
| 6451 | /* Initialize the pre-computed randomized freelist if slab is up */ |
| 6452 | if (slab_state >= UP) { |
| 6453 | if (init_cache_random_seq(s)) |
| 6454 | goto out; |
| 6455 | } |
| 6456 | |
| 6457 | if (!init_kmem_cache_nodes(s)) |
| 6458 | goto out; |
| 6459 | |
| 6460 | if (!alloc_kmem_cache_cpus(s)) |
| 6461 | goto out; |
| 6462 | |
| 6463 | err = 0; |
| 6464 | |
| 6465 | /* Mutex is not taken during early boot */ |
| 6466 | if (slab_state <= UP) |
| 6467 | goto out; |
| 6468 | |
| 6469 | /* |
| 6470 | * Failing to create sysfs files is not critical to SLUB functionality. |
| 6471 | * If it fails, proceed with cache creation without these files. |
| 6472 | */ |
| 6473 | if (sysfs_slab_add(s)) |
| 6474 | pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name); |
| 6475 | |
| 6476 | if (s->flags & SLAB_STORE_USER) |
| 6477 | debugfs_slab_add(s); |
| 6478 | |
| 6479 | out: |
| 6480 | if (err) |
| 6481 | __kmem_cache_release(s); |
| 6482 | return err; |
| 6483 | } |
| 6484 | |
| 6485 | #ifdef SLAB_SUPPORTS_SYSFS |
| 6486 | static int count_inuse(struct slab *slab) |
| 6487 | { |
| 6488 | return slab->inuse; |
| 6489 | } |
| 6490 | |
| 6491 | static int count_total(struct slab *slab) |
| 6492 | { |
| 6493 | return slab->objects; |
| 6494 | } |
| 6495 | #endif |
| 6496 | |
| 6497 | #ifdef CONFIG_SLUB_DEBUG |
| 6498 | static void validate_slab(struct kmem_cache *s, struct slab *slab, |
| 6499 | unsigned long *obj_map) |
| 6500 | { |
| 6501 | void *p; |
| 6502 | void *addr = slab_address(slab); |
| 6503 | |
| 6504 | if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) |
| 6505 | return; |
| 6506 | |
| 6507 | /* Now we know that a valid freelist exists */ |
| 6508 | __fill_map(obj_map, s, slab); |
| 6509 | for_each_object(p, s, addr, slab->objects) { |
| 6510 | u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? |
| 6511 | SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; |
| 6512 | |
| 6513 | if (!check_object(s, slab, p, val)) |
| 6514 | break; |
| 6515 | } |
| 6516 | } |
| 6517 | |
| 6518 | static int validate_slab_node(struct kmem_cache *s, |
| 6519 | struct kmem_cache_node *n, unsigned long *obj_map) |
| 6520 | { |
| 6521 | unsigned long count = 0; |
| 6522 | struct slab *slab; |
| 6523 | unsigned long flags; |
| 6524 | |
| 6525 | spin_lock_irqsave(&n->list_lock, flags); |
| 6526 | |
| 6527 | list_for_each_entry(slab, &n->partial, slab_list) { |
| 6528 | validate_slab(s, slab, obj_map); |
| 6529 | count++; |
| 6530 | } |
| 6531 | if (count != n->nr_partial) { |
| 6532 | pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", |
| 6533 | s->name, count, n->nr_partial); |
| 6534 | slab_add_kunit_errors(); |
| 6535 | } |
| 6536 | |
| 6537 | if (!(s->flags & SLAB_STORE_USER)) |
| 6538 | goto out; |
| 6539 | |
| 6540 | list_for_each_entry(slab, &n->full, slab_list) { |
| 6541 | validate_slab(s, slab, obj_map); |
| 6542 | count++; |
| 6543 | } |
| 6544 | if (count != node_nr_slabs(n)) { |
| 6545 | pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", |
| 6546 | s->name, count, node_nr_slabs(n)); |
| 6547 | slab_add_kunit_errors(); |
| 6548 | } |
| 6549 | |
| 6550 | out: |
| 6551 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 6552 | return count; |
| 6553 | } |
| 6554 | |
| 6555 | long validate_slab_cache(struct kmem_cache *s) |
| 6556 | { |
| 6557 | int node; |
| 6558 | unsigned long count = 0; |
| 6559 | struct kmem_cache_node *n; |
| 6560 | unsigned long *obj_map; |
| 6561 | |
| 6562 | obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); |
| 6563 | if (!obj_map) |
| 6564 | return -ENOMEM; |
| 6565 | |
| 6566 | flush_all(s); |
| 6567 | for_each_kmem_cache_node(s, node, n) |
| 6568 | count += validate_slab_node(s, n, obj_map); |
| 6569 | |
| 6570 | bitmap_free(obj_map); |
| 6571 | |
| 6572 | return count; |
| 6573 | } |
| 6574 | EXPORT_SYMBOL(validate_slab_cache); |
| 6575 | |
| 6576 | #ifdef CONFIG_DEBUG_FS |
| 6577 | /* |
| 6578 | * Generate lists of code addresses where slabcache objects are allocated |
| 6579 | * and freed. |
| 6580 | */ |
| 6581 | |
| 6582 | struct location { |
| 6583 | depot_stack_handle_t handle; |
| 6584 | unsigned long count; |
| 6585 | unsigned long addr; |
| 6586 | unsigned long waste; |
| 6587 | long long sum_time; |
| 6588 | long min_time; |
| 6589 | long max_time; |
| 6590 | long min_pid; |
| 6591 | long max_pid; |
| 6592 | DECLARE_BITMAP(cpus, NR_CPUS); |
| 6593 | nodemask_t nodes; |
| 6594 | }; |
| 6595 | |
| 6596 | struct loc_track { |
| 6597 | unsigned long max; |
| 6598 | unsigned long count; |
| 6599 | struct location *loc; |
| 6600 | loff_t idx; |
| 6601 | }; |
| 6602 | |
| 6603 | static struct dentry *slab_debugfs_root; |
| 6604 | |
| 6605 | static void free_loc_track(struct loc_track *t) |
| 6606 | { |
| 6607 | if (t->max) |
| 6608 | free_pages((unsigned long)t->loc, |
| 6609 | get_order(sizeof(struct location) * t->max)); |
| 6610 | } |
| 6611 | |
| 6612 | static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) |
| 6613 | { |
| 6614 | struct location *l; |
| 6615 | int order; |
| 6616 | |
| 6617 | order = get_order(sizeof(struct location) * max); |
| 6618 | |
| 6619 | l = (void *)__get_free_pages(flags, order); |
| 6620 | if (!l) |
| 6621 | return 0; |
| 6622 | |
| 6623 | if (t->count) { |
| 6624 | memcpy(l, t->loc, sizeof(struct location) * t->count); |
| 6625 | free_loc_track(t); |
| 6626 | } |
| 6627 | t->max = max; |
| 6628 | t->loc = l; |
| 6629 | return 1; |
| 6630 | } |
| 6631 | |
| 6632 | static int add_location(struct loc_track *t, struct kmem_cache *s, |
| 6633 | const struct track *track, |
| 6634 | unsigned int orig_size) |
| 6635 | { |
| 6636 | long start, end, pos; |
| 6637 | struct location *l; |
| 6638 | unsigned long caddr, chandle, cwaste; |
| 6639 | unsigned long age = jiffies - track->when; |
| 6640 | depot_stack_handle_t handle = 0; |
| 6641 | unsigned int waste = s->object_size - orig_size; |
| 6642 | |
| 6643 | #ifdef CONFIG_STACKDEPOT |
| 6644 | handle = READ_ONCE(track->handle); |
| 6645 | #endif |
| 6646 | start = -1; |
| 6647 | end = t->count; |
| 6648 | |
| 6649 | for ( ; ; ) { |
| 6650 | pos = start + (end - start + 1) / 2; |
| 6651 | |
| 6652 | /* |
| 6653 | * There is nothing at "end". If we end up there |
| 6654 | * we need to add something to before end. |
| 6655 | */ |
| 6656 | if (pos == end) |
| 6657 | break; |
| 6658 | |
| 6659 | l = &t->loc[pos]; |
| 6660 | caddr = l->addr; |
| 6661 | chandle = l->handle; |
| 6662 | cwaste = l->waste; |
| 6663 | if ((track->addr == caddr) && (handle == chandle) && |
| 6664 | (waste == cwaste)) { |
| 6665 | |
| 6666 | l->count++; |
| 6667 | if (track->when) { |
| 6668 | l->sum_time += age; |
| 6669 | if (age < l->min_time) |
| 6670 | l->min_time = age; |
| 6671 | if (age > l->max_time) |
| 6672 | l->max_time = age; |
| 6673 | |
| 6674 | if (track->pid < l->min_pid) |
| 6675 | l->min_pid = track->pid; |
| 6676 | if (track->pid > l->max_pid) |
| 6677 | l->max_pid = track->pid; |
| 6678 | |
| 6679 | cpumask_set_cpu(track->cpu, |
| 6680 | to_cpumask(l->cpus)); |
| 6681 | } |
| 6682 | node_set(page_to_nid(virt_to_page(track)), l->nodes); |
| 6683 | return 1; |
| 6684 | } |
| 6685 | |
| 6686 | if (track->addr < caddr) |
| 6687 | end = pos; |
| 6688 | else if (track->addr == caddr && handle < chandle) |
| 6689 | end = pos; |
| 6690 | else if (track->addr == caddr && handle == chandle && |
| 6691 | waste < cwaste) |
| 6692 | end = pos; |
| 6693 | else |
| 6694 | start = pos; |
| 6695 | } |
| 6696 | |
| 6697 | /* |
| 6698 | * Not found. Insert new tracking element. |
| 6699 | */ |
| 6700 | if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) |
| 6701 | return 0; |
| 6702 | |
| 6703 | l = t->loc + pos; |
| 6704 | if (pos < t->count) |
| 6705 | memmove(l + 1, l, |
| 6706 | (t->count - pos) * sizeof(struct location)); |
| 6707 | t->count++; |
| 6708 | l->count = 1; |
| 6709 | l->addr = track->addr; |
| 6710 | l->sum_time = age; |
| 6711 | l->min_time = age; |
| 6712 | l->max_time = age; |
| 6713 | l->min_pid = track->pid; |
| 6714 | l->max_pid = track->pid; |
| 6715 | l->handle = handle; |
| 6716 | l->waste = waste; |
| 6717 | cpumask_clear(to_cpumask(l->cpus)); |
| 6718 | cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); |
| 6719 | nodes_clear(l->nodes); |
| 6720 | node_set(page_to_nid(virt_to_page(track)), l->nodes); |
| 6721 | return 1; |
| 6722 | } |
| 6723 | |
| 6724 | static void process_slab(struct loc_track *t, struct kmem_cache *s, |
| 6725 | struct slab *slab, enum track_item alloc, |
| 6726 | unsigned long *obj_map) |
| 6727 | { |
| 6728 | void *addr = slab_address(slab); |
| 6729 | bool is_alloc = (alloc == TRACK_ALLOC); |
| 6730 | void *p; |
| 6731 | |
| 6732 | __fill_map(obj_map, s, slab); |
| 6733 | |
| 6734 | for_each_object(p, s, addr, slab->objects) |
| 6735 | if (!test_bit(__obj_to_index(s, addr, p), obj_map)) |
| 6736 | add_location(t, s, get_track(s, p, alloc), |
| 6737 | is_alloc ? get_orig_size(s, p) : |
| 6738 | s->object_size); |
| 6739 | } |
| 6740 | #endif /* CONFIG_DEBUG_FS */ |
| 6741 | #endif /* CONFIG_SLUB_DEBUG */ |
| 6742 | |
| 6743 | #ifdef SLAB_SUPPORTS_SYSFS |
| 6744 | enum slab_stat_type { |
| 6745 | SL_ALL, /* All slabs */ |
| 6746 | SL_PARTIAL, /* Only partially allocated slabs */ |
| 6747 | SL_CPU, /* Only slabs used for cpu caches */ |
| 6748 | SL_OBJECTS, /* Determine allocated objects not slabs */ |
| 6749 | SL_TOTAL /* Determine object capacity not slabs */ |
| 6750 | }; |
| 6751 | |
| 6752 | #define SO_ALL (1 << SL_ALL) |
| 6753 | #define SO_PARTIAL (1 << SL_PARTIAL) |
| 6754 | #define SO_CPU (1 << SL_CPU) |
| 6755 | #define SO_OBJECTS (1 << SL_OBJECTS) |
| 6756 | #define SO_TOTAL (1 << SL_TOTAL) |
| 6757 | |
| 6758 | static ssize_t show_slab_objects(struct kmem_cache *s, |
| 6759 | char *buf, unsigned long flags) |
| 6760 | { |
| 6761 | unsigned long total = 0; |
| 6762 | int node; |
| 6763 | int x; |
| 6764 | unsigned long *nodes; |
| 6765 | int len = 0; |
| 6766 | |
| 6767 | nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); |
| 6768 | if (!nodes) |
| 6769 | return -ENOMEM; |
| 6770 | |
| 6771 | if (flags & SO_CPU) { |
| 6772 | int cpu; |
| 6773 | |
| 6774 | for_each_possible_cpu(cpu) { |
| 6775 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, |
| 6776 | cpu); |
| 6777 | int node; |
| 6778 | struct slab *slab; |
| 6779 | |
| 6780 | slab = READ_ONCE(c->slab); |
| 6781 | if (!slab) |
| 6782 | continue; |
| 6783 | |
| 6784 | node = slab_nid(slab); |
| 6785 | if (flags & SO_TOTAL) |
| 6786 | x = slab->objects; |
| 6787 | else if (flags & SO_OBJECTS) |
| 6788 | x = slab->inuse; |
| 6789 | else |
| 6790 | x = 1; |
| 6791 | |
| 6792 | total += x; |
| 6793 | nodes[node] += x; |
| 6794 | |
| 6795 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 6796 | slab = slub_percpu_partial_read_once(c); |
| 6797 | if (slab) { |
| 6798 | node = slab_nid(slab); |
| 6799 | if (flags & SO_TOTAL) |
| 6800 | WARN_ON_ONCE(1); |
| 6801 | else if (flags & SO_OBJECTS) |
| 6802 | WARN_ON_ONCE(1); |
| 6803 | else |
| 6804 | x = data_race(slab->slabs); |
| 6805 | total += x; |
| 6806 | nodes[node] += x; |
| 6807 | } |
| 6808 | #endif |
| 6809 | } |
| 6810 | } |
| 6811 | |
| 6812 | /* |
| 6813 | * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" |
| 6814 | * already held which will conflict with an existing lock order: |
| 6815 | * |
| 6816 | * mem_hotplug_lock->slab_mutex->kernfs_mutex |
| 6817 | * |
| 6818 | * We don't really need mem_hotplug_lock (to hold off |
| 6819 | * slab_mem_going_offline_callback) here because slab's memory hot |
| 6820 | * unplug code doesn't destroy the kmem_cache->node[] data. |
| 6821 | */ |
| 6822 | |
| 6823 | #ifdef CONFIG_SLUB_DEBUG |
| 6824 | if (flags & SO_ALL) { |
| 6825 | struct kmem_cache_node *n; |
| 6826 | |
| 6827 | for_each_kmem_cache_node(s, node, n) { |
| 6828 | |
| 6829 | if (flags & SO_TOTAL) |
| 6830 | x = node_nr_objs(n); |
| 6831 | else if (flags & SO_OBJECTS) |
| 6832 | x = node_nr_objs(n) - count_partial(n, count_free); |
| 6833 | else |
| 6834 | x = node_nr_slabs(n); |
| 6835 | total += x; |
| 6836 | nodes[node] += x; |
| 6837 | } |
| 6838 | |
| 6839 | } else |
| 6840 | #endif |
| 6841 | if (flags & SO_PARTIAL) { |
| 6842 | struct kmem_cache_node *n; |
| 6843 | |
| 6844 | for_each_kmem_cache_node(s, node, n) { |
| 6845 | if (flags & SO_TOTAL) |
| 6846 | x = count_partial(n, count_total); |
| 6847 | else if (flags & SO_OBJECTS) |
| 6848 | x = count_partial(n, count_inuse); |
| 6849 | else |
| 6850 | x = n->nr_partial; |
| 6851 | total += x; |
| 6852 | nodes[node] += x; |
| 6853 | } |
| 6854 | } |
| 6855 | |
| 6856 | len += sysfs_emit_at(buf, len, "%lu", total); |
| 6857 | #ifdef CONFIG_NUMA |
| 6858 | for (node = 0; node < nr_node_ids; node++) { |
| 6859 | if (nodes[node]) |
| 6860 | len += sysfs_emit_at(buf, len, " N%d=%lu", |
| 6861 | node, nodes[node]); |
| 6862 | } |
| 6863 | #endif |
| 6864 | len += sysfs_emit_at(buf, len, "\n"); |
| 6865 | kfree(nodes); |
| 6866 | |
| 6867 | return len; |
| 6868 | } |
| 6869 | |
| 6870 | #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) |
| 6871 | #define to_slab(n) container_of(n, struct kmem_cache, kobj) |
| 6872 | |
| 6873 | struct slab_attribute { |
| 6874 | struct attribute attr; |
| 6875 | ssize_t (*show)(struct kmem_cache *s, char *buf); |
| 6876 | ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); |
| 6877 | }; |
| 6878 | |
| 6879 | #define SLAB_ATTR_RO(_name) \ |
| 6880 | static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) |
| 6881 | |
| 6882 | #define SLAB_ATTR(_name) \ |
| 6883 | static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) |
| 6884 | |
| 6885 | static ssize_t slab_size_show(struct kmem_cache *s, char *buf) |
| 6886 | { |
| 6887 | return sysfs_emit(buf, "%u\n", s->size); |
| 6888 | } |
| 6889 | SLAB_ATTR_RO(slab_size); |
| 6890 | |
| 6891 | static ssize_t align_show(struct kmem_cache *s, char *buf) |
| 6892 | { |
| 6893 | return sysfs_emit(buf, "%u\n", s->align); |
| 6894 | } |
| 6895 | SLAB_ATTR_RO(align); |
| 6896 | |
| 6897 | static ssize_t object_size_show(struct kmem_cache *s, char *buf) |
| 6898 | { |
| 6899 | return sysfs_emit(buf, "%u\n", s->object_size); |
| 6900 | } |
| 6901 | SLAB_ATTR_RO(object_size); |
| 6902 | |
| 6903 | static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) |
| 6904 | { |
| 6905 | return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); |
| 6906 | } |
| 6907 | SLAB_ATTR_RO(objs_per_slab); |
| 6908 | |
| 6909 | static ssize_t order_show(struct kmem_cache *s, char *buf) |
| 6910 | { |
| 6911 | return sysfs_emit(buf, "%u\n", oo_order(s->oo)); |
| 6912 | } |
| 6913 | SLAB_ATTR_RO(order); |
| 6914 | |
| 6915 | static ssize_t min_partial_show(struct kmem_cache *s, char *buf) |
| 6916 | { |
| 6917 | return sysfs_emit(buf, "%lu\n", s->min_partial); |
| 6918 | } |
| 6919 | |
| 6920 | static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, |
| 6921 | size_t length) |
| 6922 | { |
| 6923 | unsigned long min; |
| 6924 | int err; |
| 6925 | |
| 6926 | err = kstrtoul(buf, 10, &min); |
| 6927 | if (err) |
| 6928 | return err; |
| 6929 | |
| 6930 | s->min_partial = min; |
| 6931 | return length; |
| 6932 | } |
| 6933 | SLAB_ATTR(min_partial); |
| 6934 | |
| 6935 | static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) |
| 6936 | { |
| 6937 | unsigned int nr_partial = 0; |
| 6938 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 6939 | nr_partial = s->cpu_partial; |
| 6940 | #endif |
| 6941 | |
| 6942 | return sysfs_emit(buf, "%u\n", nr_partial); |
| 6943 | } |
| 6944 | |
| 6945 | static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, |
| 6946 | size_t length) |
| 6947 | { |
| 6948 | unsigned int objects; |
| 6949 | int err; |
| 6950 | |
| 6951 | err = kstrtouint(buf, 10, &objects); |
| 6952 | if (err) |
| 6953 | return err; |
| 6954 | if (objects && !kmem_cache_has_cpu_partial(s)) |
| 6955 | return -EINVAL; |
| 6956 | |
| 6957 | slub_set_cpu_partial(s, objects); |
| 6958 | flush_all(s); |
| 6959 | return length; |
| 6960 | } |
| 6961 | SLAB_ATTR(cpu_partial); |
| 6962 | |
| 6963 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
| 6964 | { |
| 6965 | if (!s->ctor) |
| 6966 | return 0; |
| 6967 | return sysfs_emit(buf, "%pS\n", s->ctor); |
| 6968 | } |
| 6969 | SLAB_ATTR_RO(ctor); |
| 6970 | |
| 6971 | static ssize_t aliases_show(struct kmem_cache *s, char *buf) |
| 6972 | { |
| 6973 | return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); |
| 6974 | } |
| 6975 | SLAB_ATTR_RO(aliases); |
| 6976 | |
| 6977 | static ssize_t partial_show(struct kmem_cache *s, char *buf) |
| 6978 | { |
| 6979 | return show_slab_objects(s, buf, SO_PARTIAL); |
| 6980 | } |
| 6981 | SLAB_ATTR_RO(partial); |
| 6982 | |
| 6983 | static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) |
| 6984 | { |
| 6985 | return show_slab_objects(s, buf, SO_CPU); |
| 6986 | } |
| 6987 | SLAB_ATTR_RO(cpu_slabs); |
| 6988 | |
| 6989 | static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) |
| 6990 | { |
| 6991 | return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); |
| 6992 | } |
| 6993 | SLAB_ATTR_RO(objects_partial); |
| 6994 | |
| 6995 | static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) |
| 6996 | { |
| 6997 | int objects = 0; |
| 6998 | int slabs = 0; |
| 6999 | int cpu __maybe_unused; |
| 7000 | int len = 0; |
| 7001 | |
| 7002 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 7003 | for_each_online_cpu(cpu) { |
| 7004 | struct slab *slab; |
| 7005 | |
| 7006 | slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); |
| 7007 | |
| 7008 | if (slab) |
| 7009 | slabs += data_race(slab->slabs); |
| 7010 | } |
| 7011 | #endif |
| 7012 | |
| 7013 | /* Approximate half-full slabs, see slub_set_cpu_partial() */ |
| 7014 | objects = (slabs * oo_objects(s->oo)) / 2; |
| 7015 | len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); |
| 7016 | |
| 7017 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 7018 | for_each_online_cpu(cpu) { |
| 7019 | struct slab *slab; |
| 7020 | |
| 7021 | slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); |
| 7022 | if (slab) { |
| 7023 | slabs = data_race(slab->slabs); |
| 7024 | objects = (slabs * oo_objects(s->oo)) / 2; |
| 7025 | len += sysfs_emit_at(buf, len, " C%d=%d(%d)", |
| 7026 | cpu, objects, slabs); |
| 7027 | } |
| 7028 | } |
| 7029 | #endif |
| 7030 | len += sysfs_emit_at(buf, len, "\n"); |
| 7031 | |
| 7032 | return len; |
| 7033 | } |
| 7034 | SLAB_ATTR_RO(slabs_cpu_partial); |
| 7035 | |
| 7036 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) |
| 7037 | { |
| 7038 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); |
| 7039 | } |
| 7040 | SLAB_ATTR_RO(reclaim_account); |
| 7041 | |
| 7042 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) |
| 7043 | { |
| 7044 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); |
| 7045 | } |
| 7046 | SLAB_ATTR_RO(hwcache_align); |
| 7047 | |
| 7048 | #ifdef CONFIG_ZONE_DMA |
| 7049 | static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) |
| 7050 | { |
| 7051 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); |
| 7052 | } |
| 7053 | SLAB_ATTR_RO(cache_dma); |
| 7054 | #endif |
| 7055 | |
| 7056 | #ifdef CONFIG_HARDENED_USERCOPY |
| 7057 | static ssize_t usersize_show(struct kmem_cache *s, char *buf) |
| 7058 | { |
| 7059 | return sysfs_emit(buf, "%u\n", s->usersize); |
| 7060 | } |
| 7061 | SLAB_ATTR_RO(usersize); |
| 7062 | #endif |
| 7063 | |
| 7064 | static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) |
| 7065 | { |
| 7066 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); |
| 7067 | } |
| 7068 | SLAB_ATTR_RO(destroy_by_rcu); |
| 7069 | |
| 7070 | #ifdef CONFIG_SLUB_DEBUG |
| 7071 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) |
| 7072 | { |
| 7073 | return show_slab_objects(s, buf, SO_ALL); |
| 7074 | } |
| 7075 | SLAB_ATTR_RO(slabs); |
| 7076 | |
| 7077 | static ssize_t total_objects_show(struct kmem_cache *s, char *buf) |
| 7078 | { |
| 7079 | return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); |
| 7080 | } |
| 7081 | SLAB_ATTR_RO(total_objects); |
| 7082 | |
| 7083 | static ssize_t objects_show(struct kmem_cache *s, char *buf) |
| 7084 | { |
| 7085 | return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); |
| 7086 | } |
| 7087 | SLAB_ATTR_RO(objects); |
| 7088 | |
| 7089 | static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) |
| 7090 | { |
| 7091 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); |
| 7092 | } |
| 7093 | SLAB_ATTR_RO(sanity_checks); |
| 7094 | |
| 7095 | static ssize_t trace_show(struct kmem_cache *s, char *buf) |
| 7096 | { |
| 7097 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); |
| 7098 | } |
| 7099 | SLAB_ATTR_RO(trace); |
| 7100 | |
| 7101 | static ssize_t red_zone_show(struct kmem_cache *s, char *buf) |
| 7102 | { |
| 7103 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); |
| 7104 | } |
| 7105 | |
| 7106 | SLAB_ATTR_RO(red_zone); |
| 7107 | |
| 7108 | static ssize_t poison_show(struct kmem_cache *s, char *buf) |
| 7109 | { |
| 7110 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); |
| 7111 | } |
| 7112 | |
| 7113 | SLAB_ATTR_RO(poison); |
| 7114 | |
| 7115 | static ssize_t store_user_show(struct kmem_cache *s, char *buf) |
| 7116 | { |
| 7117 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); |
| 7118 | } |
| 7119 | |
| 7120 | SLAB_ATTR_RO(store_user); |
| 7121 | |
| 7122 | static ssize_t validate_show(struct kmem_cache *s, char *buf) |
| 7123 | { |
| 7124 | return 0; |
| 7125 | } |
| 7126 | |
| 7127 | static ssize_t validate_store(struct kmem_cache *s, |
| 7128 | const char *buf, size_t length) |
| 7129 | { |
| 7130 | int ret = -EINVAL; |
| 7131 | |
| 7132 | if (buf[0] == '1' && kmem_cache_debug(s)) { |
| 7133 | ret = validate_slab_cache(s); |
| 7134 | if (ret >= 0) |
| 7135 | ret = length; |
| 7136 | } |
| 7137 | return ret; |
| 7138 | } |
| 7139 | SLAB_ATTR(validate); |
| 7140 | |
| 7141 | #endif /* CONFIG_SLUB_DEBUG */ |
| 7142 | |
| 7143 | #ifdef CONFIG_FAILSLAB |
| 7144 | static ssize_t failslab_show(struct kmem_cache *s, char *buf) |
| 7145 | { |
| 7146 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); |
| 7147 | } |
| 7148 | |
| 7149 | static ssize_t failslab_store(struct kmem_cache *s, const char *buf, |
| 7150 | size_t length) |
| 7151 | { |
| 7152 | if (s->refcount > 1) |
| 7153 | return -EINVAL; |
| 7154 | |
| 7155 | if (buf[0] == '1') |
| 7156 | WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); |
| 7157 | else |
| 7158 | WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); |
| 7159 | |
| 7160 | return length; |
| 7161 | } |
| 7162 | SLAB_ATTR(failslab); |
| 7163 | #endif |
| 7164 | |
| 7165 | static ssize_t shrink_show(struct kmem_cache *s, char *buf) |
| 7166 | { |
| 7167 | return 0; |
| 7168 | } |
| 7169 | |
| 7170 | static ssize_t shrink_store(struct kmem_cache *s, |
| 7171 | const char *buf, size_t length) |
| 7172 | { |
| 7173 | if (buf[0] == '1') |
| 7174 | kmem_cache_shrink(s); |
| 7175 | else |
| 7176 | return -EINVAL; |
| 7177 | return length; |
| 7178 | } |
| 7179 | SLAB_ATTR(shrink); |
| 7180 | |
| 7181 | #ifdef CONFIG_NUMA |
| 7182 | static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) |
| 7183 | { |
| 7184 | return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); |
| 7185 | } |
| 7186 | |
| 7187 | static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, |
| 7188 | const char *buf, size_t length) |
| 7189 | { |
| 7190 | unsigned int ratio; |
| 7191 | int err; |
| 7192 | |
| 7193 | err = kstrtouint(buf, 10, &ratio); |
| 7194 | if (err) |
| 7195 | return err; |
| 7196 | if (ratio > 100) |
| 7197 | return -ERANGE; |
| 7198 | |
| 7199 | s->remote_node_defrag_ratio = ratio * 10; |
| 7200 | |
| 7201 | return length; |
| 7202 | } |
| 7203 | SLAB_ATTR(remote_node_defrag_ratio); |
| 7204 | #endif |
| 7205 | |
| 7206 | #ifdef CONFIG_SLUB_STATS |
| 7207 | static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) |
| 7208 | { |
| 7209 | unsigned long sum = 0; |
| 7210 | int cpu; |
| 7211 | int len = 0; |
| 7212 | int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); |
| 7213 | |
| 7214 | if (!data) |
| 7215 | return -ENOMEM; |
| 7216 | |
| 7217 | for_each_online_cpu(cpu) { |
| 7218 | unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; |
| 7219 | |
| 7220 | data[cpu] = x; |
| 7221 | sum += x; |
| 7222 | } |
| 7223 | |
| 7224 | len += sysfs_emit_at(buf, len, "%lu", sum); |
| 7225 | |
| 7226 | #ifdef CONFIG_SMP |
| 7227 | for_each_online_cpu(cpu) { |
| 7228 | if (data[cpu]) |
| 7229 | len += sysfs_emit_at(buf, len, " C%d=%u", |
| 7230 | cpu, data[cpu]); |
| 7231 | } |
| 7232 | #endif |
| 7233 | kfree(data); |
| 7234 | len += sysfs_emit_at(buf, len, "\n"); |
| 7235 | |
| 7236 | return len; |
| 7237 | } |
| 7238 | |
| 7239 | static void clear_stat(struct kmem_cache *s, enum stat_item si) |
| 7240 | { |
| 7241 | int cpu; |
| 7242 | |
| 7243 | for_each_online_cpu(cpu) |
| 7244 | per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; |
| 7245 | } |
| 7246 | |
| 7247 | #define STAT_ATTR(si, text) \ |
| 7248 | static ssize_t text##_show(struct kmem_cache *s, char *buf) \ |
| 7249 | { \ |
| 7250 | return show_stat(s, buf, si); \ |
| 7251 | } \ |
| 7252 | static ssize_t text##_store(struct kmem_cache *s, \ |
| 7253 | const char *buf, size_t length) \ |
| 7254 | { \ |
| 7255 | if (buf[0] != '0') \ |
| 7256 | return -EINVAL; \ |
| 7257 | clear_stat(s, si); \ |
| 7258 | return length; \ |
| 7259 | } \ |
| 7260 | SLAB_ATTR(text); \ |
| 7261 | |
| 7262 | STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); |
| 7263 | STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); |
| 7264 | STAT_ATTR(FREE_FASTPATH, free_fastpath); |
| 7265 | STAT_ATTR(FREE_SLOWPATH, free_slowpath); |
| 7266 | STAT_ATTR(FREE_FROZEN, free_frozen); |
| 7267 | STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); |
| 7268 | STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); |
| 7269 | STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); |
| 7270 | STAT_ATTR(ALLOC_SLAB, alloc_slab); |
| 7271 | STAT_ATTR(ALLOC_REFILL, alloc_refill); |
| 7272 | STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); |
| 7273 | STAT_ATTR(FREE_SLAB, free_slab); |
| 7274 | STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); |
| 7275 | STAT_ATTR(DEACTIVATE_FULL, deactivate_full); |
| 7276 | STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); |
| 7277 | STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); |
| 7278 | STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); |
| 7279 | STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); |
| 7280 | STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); |
| 7281 | STAT_ATTR(ORDER_FALLBACK, order_fallback); |
| 7282 | STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); |
| 7283 | STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); |
| 7284 | STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); |
| 7285 | STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); |
| 7286 | STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); |
| 7287 | STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); |
| 7288 | #endif /* CONFIG_SLUB_STATS */ |
| 7289 | |
| 7290 | #ifdef CONFIG_KFENCE |
| 7291 | static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) |
| 7292 | { |
| 7293 | return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); |
| 7294 | } |
| 7295 | |
| 7296 | static ssize_t skip_kfence_store(struct kmem_cache *s, |
| 7297 | const char *buf, size_t length) |
| 7298 | { |
| 7299 | int ret = length; |
| 7300 | |
| 7301 | if (buf[0] == '0') |
| 7302 | s->flags &= ~SLAB_SKIP_KFENCE; |
| 7303 | else if (buf[0] == '1') |
| 7304 | s->flags |= SLAB_SKIP_KFENCE; |
| 7305 | else |
| 7306 | ret = -EINVAL; |
| 7307 | |
| 7308 | return ret; |
| 7309 | } |
| 7310 | SLAB_ATTR(skip_kfence); |
| 7311 | #endif |
| 7312 | |
| 7313 | static struct attribute *slab_attrs[] = { |
| 7314 | &slab_size_attr.attr, |
| 7315 | &object_size_attr.attr, |
| 7316 | &objs_per_slab_attr.attr, |
| 7317 | &order_attr.attr, |
| 7318 | &min_partial_attr.attr, |
| 7319 | &cpu_partial_attr.attr, |
| 7320 | &objects_partial_attr.attr, |
| 7321 | &partial_attr.attr, |
| 7322 | &cpu_slabs_attr.attr, |
| 7323 | &ctor_attr.attr, |
| 7324 | &aliases_attr.attr, |
| 7325 | &align_attr.attr, |
| 7326 | &hwcache_align_attr.attr, |
| 7327 | &reclaim_account_attr.attr, |
| 7328 | &destroy_by_rcu_attr.attr, |
| 7329 | &shrink_attr.attr, |
| 7330 | &slabs_cpu_partial_attr.attr, |
| 7331 | #ifdef CONFIG_SLUB_DEBUG |
| 7332 | &total_objects_attr.attr, |
| 7333 | &objects_attr.attr, |
| 7334 | &slabs_attr.attr, |
| 7335 | &sanity_checks_attr.attr, |
| 7336 | &trace_attr.attr, |
| 7337 | &red_zone_attr.attr, |
| 7338 | &poison_attr.attr, |
| 7339 | &store_user_attr.attr, |
| 7340 | &validate_attr.attr, |
| 7341 | #endif |
| 7342 | #ifdef CONFIG_ZONE_DMA |
| 7343 | &cache_dma_attr.attr, |
| 7344 | #endif |
| 7345 | #ifdef CONFIG_NUMA |
| 7346 | &remote_node_defrag_ratio_attr.attr, |
| 7347 | #endif |
| 7348 | #ifdef CONFIG_SLUB_STATS |
| 7349 | &alloc_fastpath_attr.attr, |
| 7350 | &alloc_slowpath_attr.attr, |
| 7351 | &free_fastpath_attr.attr, |
| 7352 | &free_slowpath_attr.attr, |
| 7353 | &free_frozen_attr.attr, |
| 7354 | &free_add_partial_attr.attr, |
| 7355 | &free_remove_partial_attr.attr, |
| 7356 | &alloc_from_partial_attr.attr, |
| 7357 | &alloc_slab_attr.attr, |
| 7358 | &alloc_refill_attr.attr, |
| 7359 | &alloc_node_mismatch_attr.attr, |
| 7360 | &free_slab_attr.attr, |
| 7361 | &cpuslab_flush_attr.attr, |
| 7362 | &deactivate_full_attr.attr, |
| 7363 | &deactivate_empty_attr.attr, |
| 7364 | &deactivate_to_head_attr.attr, |
| 7365 | &deactivate_to_tail_attr.attr, |
| 7366 | &deactivate_remote_frees_attr.attr, |
| 7367 | &deactivate_bypass_attr.attr, |
| 7368 | &order_fallback_attr.attr, |
| 7369 | &cmpxchg_double_fail_attr.attr, |
| 7370 | &cmpxchg_double_cpu_fail_attr.attr, |
| 7371 | &cpu_partial_alloc_attr.attr, |
| 7372 | &cpu_partial_free_attr.attr, |
| 7373 | &cpu_partial_node_attr.attr, |
| 7374 | &cpu_partial_drain_attr.attr, |
| 7375 | #endif |
| 7376 | #ifdef CONFIG_FAILSLAB |
| 7377 | &failslab_attr.attr, |
| 7378 | #endif |
| 7379 | #ifdef CONFIG_HARDENED_USERCOPY |
| 7380 | &usersize_attr.attr, |
| 7381 | #endif |
| 7382 | #ifdef CONFIG_KFENCE |
| 7383 | &skip_kfence_attr.attr, |
| 7384 | #endif |
| 7385 | |
| 7386 | NULL |
| 7387 | }; |
| 7388 | |
| 7389 | static const struct attribute_group slab_attr_group = { |
| 7390 | .attrs = slab_attrs, |
| 7391 | }; |
| 7392 | |
| 7393 | static ssize_t slab_attr_show(struct kobject *kobj, |
| 7394 | struct attribute *attr, |
| 7395 | char *buf) |
| 7396 | { |
| 7397 | struct slab_attribute *attribute; |
| 7398 | struct kmem_cache *s; |
| 7399 | |
| 7400 | attribute = to_slab_attr(attr); |
| 7401 | s = to_slab(kobj); |
| 7402 | |
| 7403 | if (!attribute->show) |
| 7404 | return -EIO; |
| 7405 | |
| 7406 | return attribute->show(s, buf); |
| 7407 | } |
| 7408 | |
| 7409 | static ssize_t slab_attr_store(struct kobject *kobj, |
| 7410 | struct attribute *attr, |
| 7411 | const char *buf, size_t len) |
| 7412 | { |
| 7413 | struct slab_attribute *attribute; |
| 7414 | struct kmem_cache *s; |
| 7415 | |
| 7416 | attribute = to_slab_attr(attr); |
| 7417 | s = to_slab(kobj); |
| 7418 | |
| 7419 | if (!attribute->store) |
| 7420 | return -EIO; |
| 7421 | |
| 7422 | return attribute->store(s, buf, len); |
| 7423 | } |
| 7424 | |
| 7425 | static void kmem_cache_release(struct kobject *k) |
| 7426 | { |
| 7427 | slab_kmem_cache_release(to_slab(k)); |
| 7428 | } |
| 7429 | |
| 7430 | static const struct sysfs_ops slab_sysfs_ops = { |
| 7431 | .show = slab_attr_show, |
| 7432 | .store = slab_attr_store, |
| 7433 | }; |
| 7434 | |
| 7435 | static const struct kobj_type slab_ktype = { |
| 7436 | .sysfs_ops = &slab_sysfs_ops, |
| 7437 | .release = kmem_cache_release, |
| 7438 | }; |
| 7439 | |
| 7440 | static struct kset *slab_kset; |
| 7441 | |
| 7442 | static inline struct kset *cache_kset(struct kmem_cache *s) |
| 7443 | { |
| 7444 | return slab_kset; |
| 7445 | } |
| 7446 | |
| 7447 | #define ID_STR_LENGTH 32 |
| 7448 | |
| 7449 | /* Create a unique string id for a slab cache: |
| 7450 | * |
| 7451 | * Format :[flags-]size |
| 7452 | */ |
| 7453 | static char *create_unique_id(struct kmem_cache *s) |
| 7454 | { |
| 7455 | char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); |
| 7456 | char *p = name; |
| 7457 | |
| 7458 | if (!name) |
| 7459 | return ERR_PTR(-ENOMEM); |
| 7460 | |
| 7461 | *p++ = ':'; |
| 7462 | /* |
| 7463 | * First flags affecting slabcache operations. We will only |
| 7464 | * get here for aliasable slabs so we do not need to support |
| 7465 | * too many flags. The flags here must cover all flags that |
| 7466 | * are matched during merging to guarantee that the id is |
| 7467 | * unique. |
| 7468 | */ |
| 7469 | if (s->flags & SLAB_CACHE_DMA) |
| 7470 | *p++ = 'd'; |
| 7471 | if (s->flags & SLAB_CACHE_DMA32) |
| 7472 | *p++ = 'D'; |
| 7473 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
| 7474 | *p++ = 'a'; |
| 7475 | if (s->flags & SLAB_CONSISTENCY_CHECKS) |
| 7476 | *p++ = 'F'; |
| 7477 | if (s->flags & SLAB_ACCOUNT) |
| 7478 | *p++ = 'A'; |
| 7479 | if (p != name + 1) |
| 7480 | *p++ = '-'; |
| 7481 | p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); |
| 7482 | |
| 7483 | if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { |
| 7484 | kfree(name); |
| 7485 | return ERR_PTR(-EINVAL); |
| 7486 | } |
| 7487 | kmsan_unpoison_memory(name, p - name); |
| 7488 | return name; |
| 7489 | } |
| 7490 | |
| 7491 | static int sysfs_slab_add(struct kmem_cache *s) |
| 7492 | { |
| 7493 | int err; |
| 7494 | const char *name; |
| 7495 | struct kset *kset = cache_kset(s); |
| 7496 | int unmergeable = slab_unmergeable(s); |
| 7497 | |
| 7498 | if (!unmergeable && disable_higher_order_debug && |
| 7499 | (slub_debug & DEBUG_METADATA_FLAGS)) |
| 7500 | unmergeable = 1; |
| 7501 | |
| 7502 | if (unmergeable) { |
| 7503 | /* |
| 7504 | * Slabcache can never be merged so we can use the name proper. |
| 7505 | * This is typically the case for debug situations. In that |
| 7506 | * case we can catch duplicate names easily. |
| 7507 | */ |
| 7508 | sysfs_remove_link(&slab_kset->kobj, s->name); |
| 7509 | name = s->name; |
| 7510 | } else { |
| 7511 | /* |
| 7512 | * Create a unique name for the slab as a target |
| 7513 | * for the symlinks. |
| 7514 | */ |
| 7515 | name = create_unique_id(s); |
| 7516 | if (IS_ERR(name)) |
| 7517 | return PTR_ERR(name); |
| 7518 | } |
| 7519 | |
| 7520 | s->kobj.kset = kset; |
| 7521 | err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); |
| 7522 | if (err) |
| 7523 | goto out; |
| 7524 | |
| 7525 | err = sysfs_create_group(&s->kobj, &slab_attr_group); |
| 7526 | if (err) |
| 7527 | goto out_del_kobj; |
| 7528 | |
| 7529 | if (!unmergeable) { |
| 7530 | /* Setup first alias */ |
| 7531 | sysfs_slab_alias(s, s->name); |
| 7532 | } |
| 7533 | out: |
| 7534 | if (!unmergeable) |
| 7535 | kfree(name); |
| 7536 | return err; |
| 7537 | out_del_kobj: |
| 7538 | kobject_del(&s->kobj); |
| 7539 | goto out; |
| 7540 | } |
| 7541 | |
| 7542 | void sysfs_slab_unlink(struct kmem_cache *s) |
| 7543 | { |
| 7544 | if (s->kobj.state_in_sysfs) |
| 7545 | kobject_del(&s->kobj); |
| 7546 | } |
| 7547 | |
| 7548 | void sysfs_slab_release(struct kmem_cache *s) |
| 7549 | { |
| 7550 | kobject_put(&s->kobj); |
| 7551 | } |
| 7552 | |
| 7553 | /* |
| 7554 | * Need to buffer aliases during bootup until sysfs becomes |
| 7555 | * available lest we lose that information. |
| 7556 | */ |
| 7557 | struct saved_alias { |
| 7558 | struct kmem_cache *s; |
| 7559 | const char *name; |
| 7560 | struct saved_alias *next; |
| 7561 | }; |
| 7562 | |
| 7563 | static struct saved_alias *alias_list; |
| 7564 | |
| 7565 | static int sysfs_slab_alias(struct kmem_cache *s, const char *name) |
| 7566 | { |
| 7567 | struct saved_alias *al; |
| 7568 | |
| 7569 | if (slab_state == FULL) { |
| 7570 | /* |
| 7571 | * If we have a leftover link then remove it. |
| 7572 | */ |
| 7573 | sysfs_remove_link(&slab_kset->kobj, name); |
| 7574 | /* |
| 7575 | * The original cache may have failed to generate sysfs file. |
| 7576 | * In that case, sysfs_create_link() returns -ENOENT and |
| 7577 | * symbolic link creation is skipped. |
| 7578 | */ |
| 7579 | return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); |
| 7580 | } |
| 7581 | |
| 7582 | al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); |
| 7583 | if (!al) |
| 7584 | return -ENOMEM; |
| 7585 | |
| 7586 | al->s = s; |
| 7587 | al->name = name; |
| 7588 | al->next = alias_list; |
| 7589 | alias_list = al; |
| 7590 | kmsan_unpoison_memory(al, sizeof(*al)); |
| 7591 | return 0; |
| 7592 | } |
| 7593 | |
| 7594 | static int __init slab_sysfs_init(void) |
| 7595 | { |
| 7596 | struct kmem_cache *s; |
| 7597 | int err; |
| 7598 | |
| 7599 | mutex_lock(&slab_mutex); |
| 7600 | |
| 7601 | slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); |
| 7602 | if (!slab_kset) { |
| 7603 | mutex_unlock(&slab_mutex); |
| 7604 | pr_err("Cannot register slab subsystem.\n"); |
| 7605 | return -ENOMEM; |
| 7606 | } |
| 7607 | |
| 7608 | slab_state = FULL; |
| 7609 | |
| 7610 | list_for_each_entry(s, &slab_caches, list) { |
| 7611 | err = sysfs_slab_add(s); |
| 7612 | if (err) |
| 7613 | pr_err("SLUB: Unable to add boot slab %s to sysfs\n", |
| 7614 | s->name); |
| 7615 | } |
| 7616 | |
| 7617 | while (alias_list) { |
| 7618 | struct saved_alias *al = alias_list; |
| 7619 | |
| 7620 | alias_list = alias_list->next; |
| 7621 | err = sysfs_slab_alias(al->s, al->name); |
| 7622 | if (err) |
| 7623 | pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", |
| 7624 | al->name); |
| 7625 | kfree(al); |
| 7626 | } |
| 7627 | |
| 7628 | mutex_unlock(&slab_mutex); |
| 7629 | return 0; |
| 7630 | } |
| 7631 | late_initcall(slab_sysfs_init); |
| 7632 | #endif /* SLAB_SUPPORTS_SYSFS */ |
| 7633 | |
| 7634 | #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) |
| 7635 | static int slab_debugfs_show(struct seq_file *seq, void *v) |
| 7636 | { |
| 7637 | struct loc_track *t = seq->private; |
| 7638 | struct location *l; |
| 7639 | unsigned long idx; |
| 7640 | |
| 7641 | idx = (unsigned long) t->idx; |
| 7642 | if (idx < t->count) { |
| 7643 | l = &t->loc[idx]; |
| 7644 | |
| 7645 | seq_printf(seq, "%7ld ", l->count); |
| 7646 | |
| 7647 | if (l->addr) |
| 7648 | seq_printf(seq, "%pS", (void *)l->addr); |
| 7649 | else |
| 7650 | seq_puts(seq, "<not-available>"); |
| 7651 | |
| 7652 | if (l->waste) |
| 7653 | seq_printf(seq, " waste=%lu/%lu", |
| 7654 | l->count * l->waste, l->waste); |
| 7655 | |
| 7656 | if (l->sum_time != l->min_time) { |
| 7657 | seq_printf(seq, " age=%ld/%llu/%ld", |
| 7658 | l->min_time, div_u64(l->sum_time, l->count), |
| 7659 | l->max_time); |
| 7660 | } else |
| 7661 | seq_printf(seq, " age=%ld", l->min_time); |
| 7662 | |
| 7663 | if (l->min_pid != l->max_pid) |
| 7664 | seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); |
| 7665 | else |
| 7666 | seq_printf(seq, " pid=%ld", |
| 7667 | l->min_pid); |
| 7668 | |
| 7669 | if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) |
| 7670 | seq_printf(seq, " cpus=%*pbl", |
| 7671 | cpumask_pr_args(to_cpumask(l->cpus))); |
| 7672 | |
| 7673 | if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) |
| 7674 | seq_printf(seq, " nodes=%*pbl", |
| 7675 | nodemask_pr_args(&l->nodes)); |
| 7676 | |
| 7677 | #ifdef CONFIG_STACKDEPOT |
| 7678 | { |
| 7679 | depot_stack_handle_t handle; |
| 7680 | unsigned long *entries; |
| 7681 | unsigned int nr_entries, j; |
| 7682 | |
| 7683 | handle = READ_ONCE(l->handle); |
| 7684 | if (handle) { |
| 7685 | nr_entries = stack_depot_fetch(handle, &entries); |
| 7686 | seq_puts(seq, "\n"); |
| 7687 | for (j = 0; j < nr_entries; j++) |
| 7688 | seq_printf(seq, " %pS\n", (void *)entries[j]); |
| 7689 | } |
| 7690 | } |
| 7691 | #endif |
| 7692 | seq_puts(seq, "\n"); |
| 7693 | } |
| 7694 | |
| 7695 | if (!idx && !t->count) |
| 7696 | seq_puts(seq, "No data\n"); |
| 7697 | |
| 7698 | return 0; |
| 7699 | } |
| 7700 | |
| 7701 | static void slab_debugfs_stop(struct seq_file *seq, void *v) |
| 7702 | { |
| 7703 | } |
| 7704 | |
| 7705 | static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) |
| 7706 | { |
| 7707 | struct loc_track *t = seq->private; |
| 7708 | |
| 7709 | t->idx = ++(*ppos); |
| 7710 | if (*ppos <= t->count) |
| 7711 | return ppos; |
| 7712 | |
| 7713 | return NULL; |
| 7714 | } |
| 7715 | |
| 7716 | static int cmp_loc_by_count(const void *a, const void *b, const void *data) |
| 7717 | { |
| 7718 | struct location *loc1 = (struct location *)a; |
| 7719 | struct location *loc2 = (struct location *)b; |
| 7720 | |
| 7721 | if (loc1->count > loc2->count) |
| 7722 | return -1; |
| 7723 | else |
| 7724 | return 1; |
| 7725 | } |
| 7726 | |
| 7727 | static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) |
| 7728 | { |
| 7729 | struct loc_track *t = seq->private; |
| 7730 | |
| 7731 | t->idx = *ppos; |
| 7732 | return ppos; |
| 7733 | } |
| 7734 | |
| 7735 | static const struct seq_operations slab_debugfs_sops = { |
| 7736 | .start = slab_debugfs_start, |
| 7737 | .next = slab_debugfs_next, |
| 7738 | .stop = slab_debugfs_stop, |
| 7739 | .show = slab_debugfs_show, |
| 7740 | }; |
| 7741 | |
| 7742 | static int slab_debug_trace_open(struct inode *inode, struct file *filep) |
| 7743 | { |
| 7744 | |
| 7745 | struct kmem_cache_node *n; |
| 7746 | enum track_item alloc; |
| 7747 | int node; |
| 7748 | struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, |
| 7749 | sizeof(struct loc_track)); |
| 7750 | struct kmem_cache *s = file_inode(filep)->i_private; |
| 7751 | unsigned long *obj_map; |
| 7752 | |
| 7753 | if (!t) |
| 7754 | return -ENOMEM; |
| 7755 | |
| 7756 | obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); |
| 7757 | if (!obj_map) { |
| 7758 | seq_release_private(inode, filep); |
| 7759 | return -ENOMEM; |
| 7760 | } |
| 7761 | |
| 7762 | alloc = debugfs_get_aux_num(filep); |
| 7763 | |
| 7764 | if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { |
| 7765 | bitmap_free(obj_map); |
| 7766 | seq_release_private(inode, filep); |
| 7767 | return -ENOMEM; |
| 7768 | } |
| 7769 | |
| 7770 | for_each_kmem_cache_node(s, node, n) { |
| 7771 | unsigned long flags; |
| 7772 | struct slab *slab; |
| 7773 | |
| 7774 | if (!node_nr_slabs(n)) |
| 7775 | continue; |
| 7776 | |
| 7777 | spin_lock_irqsave(&n->list_lock, flags); |
| 7778 | list_for_each_entry(slab, &n->partial, slab_list) |
| 7779 | process_slab(t, s, slab, alloc, obj_map); |
| 7780 | list_for_each_entry(slab, &n->full, slab_list) |
| 7781 | process_slab(t, s, slab, alloc, obj_map); |
| 7782 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 7783 | } |
| 7784 | |
| 7785 | /* Sort locations by count */ |
| 7786 | sort_r(t->loc, t->count, sizeof(struct location), |
| 7787 | cmp_loc_by_count, NULL, NULL); |
| 7788 | |
| 7789 | bitmap_free(obj_map); |
| 7790 | return 0; |
| 7791 | } |
| 7792 | |
| 7793 | static int slab_debug_trace_release(struct inode *inode, struct file *file) |
| 7794 | { |
| 7795 | struct seq_file *seq = file->private_data; |
| 7796 | struct loc_track *t = seq->private; |
| 7797 | |
| 7798 | free_loc_track(t); |
| 7799 | return seq_release_private(inode, file); |
| 7800 | } |
| 7801 | |
| 7802 | static const struct file_operations slab_debugfs_fops = { |
| 7803 | .open = slab_debug_trace_open, |
| 7804 | .read = seq_read, |
| 7805 | .llseek = seq_lseek, |
| 7806 | .release = slab_debug_trace_release, |
| 7807 | }; |
| 7808 | |
| 7809 | static void debugfs_slab_add(struct kmem_cache *s) |
| 7810 | { |
| 7811 | struct dentry *slab_cache_dir; |
| 7812 | |
| 7813 | if (unlikely(!slab_debugfs_root)) |
| 7814 | return; |
| 7815 | |
| 7816 | slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); |
| 7817 | |
| 7818 | debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s, |
| 7819 | TRACK_ALLOC, &slab_debugfs_fops); |
| 7820 | |
| 7821 | debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s, |
| 7822 | TRACK_FREE, &slab_debugfs_fops); |
| 7823 | } |
| 7824 | |
| 7825 | void debugfs_slab_release(struct kmem_cache *s) |
| 7826 | { |
| 7827 | debugfs_lookup_and_remove(s->name, slab_debugfs_root); |
| 7828 | } |
| 7829 | |
| 7830 | static int __init slab_debugfs_init(void) |
| 7831 | { |
| 7832 | struct kmem_cache *s; |
| 7833 | |
| 7834 | slab_debugfs_root = debugfs_create_dir("slab", NULL); |
| 7835 | |
| 7836 | list_for_each_entry(s, &slab_caches, list) |
| 7837 | if (s->flags & SLAB_STORE_USER) |
| 7838 | debugfs_slab_add(s); |
| 7839 | |
| 7840 | return 0; |
| 7841 | |
| 7842 | } |
| 7843 | __initcall(slab_debugfs_init); |
| 7844 | #endif |
| 7845 | /* |
| 7846 | * The /proc/slabinfo ABI |
| 7847 | */ |
| 7848 | #ifdef CONFIG_SLUB_DEBUG |
| 7849 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) |
| 7850 | { |
| 7851 | unsigned long nr_slabs = 0; |
| 7852 | unsigned long nr_objs = 0; |
| 7853 | unsigned long nr_free = 0; |
| 7854 | int node; |
| 7855 | struct kmem_cache_node *n; |
| 7856 | |
| 7857 | for_each_kmem_cache_node(s, node, n) { |
| 7858 | nr_slabs += node_nr_slabs(n); |
| 7859 | nr_objs += node_nr_objs(n); |
| 7860 | nr_free += count_partial_free_approx(n); |
| 7861 | } |
| 7862 | |
| 7863 | sinfo->active_objs = nr_objs - nr_free; |
| 7864 | sinfo->num_objs = nr_objs; |
| 7865 | sinfo->active_slabs = nr_slabs; |
| 7866 | sinfo->num_slabs = nr_slabs; |
| 7867 | sinfo->objects_per_slab = oo_objects(s->oo); |
| 7868 | sinfo->cache_order = oo_order(s->oo); |
| 7869 | } |
| 7870 | #endif /* CONFIG_SLUB_DEBUG */ |