net/mlx4_core: Fix reset flow when in command polling mode
[linux-2.6-block.git] / mm / slub.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
81819f0f
CL
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
881db7fb
CL
6 * The allocator synchronizes using per slab locks or atomic operatios
7 * and only uses a centralized lock to manage a pool of partial slabs.
81819f0f 8 *
cde53535 9 * (C) 2007 SGI, Christoph Lameter
881db7fb 10 * (C) 2011 Linux Foundation, Christoph Lameter
81819f0f
CL
11 */
12
13#include <linux/mm.h>
1eb5ac64 14#include <linux/swap.h> /* struct reclaim_state */
81819f0f
CL
15#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/bitops.h>
19#include <linux/slab.h>
97d06609 20#include "slab.h"
7b3c3a50 21#include <linux/proc_fs.h>
81819f0f 22#include <linux/seq_file.h>
a79316c6 23#include <linux/kasan.h>
81819f0f
CL
24#include <linux/cpu.h>
25#include <linux/cpuset.h>
26#include <linux/mempolicy.h>
27#include <linux/ctype.h>
3ac7fe5a 28#include <linux/debugobjects.h>
81819f0f 29#include <linux/kallsyms.h>
b9049e23 30#include <linux/memory.h>
f8bd2258 31#include <linux/math64.h>
773ff60e 32#include <linux/fault-inject.h>
bfa71457 33#include <linux/stacktrace.h>
4de900b4 34#include <linux/prefetch.h>
2633d7a0 35#include <linux/memcontrol.h>
2482ddec 36#include <linux/random.h>
81819f0f 37
4a92379b
RK
38#include <trace/events/kmem.h>
39
072bb0aa
MG
40#include "internal.h"
41
81819f0f
CL
42/*
43 * Lock order:
18004c5d 44 * 1. slab_mutex (Global Mutex)
881db7fb
CL
45 * 2. node->list_lock
46 * 3. slab_lock(page) (Only on some arches and for debugging)
81819f0f 47 *
18004c5d 48 * slab_mutex
881db7fb 49 *
18004c5d 50 * The role of the slab_mutex is to protect the list of all the slabs
881db7fb
CL
51 * and to synchronize major metadata changes to slab cache structures.
52 *
53 * The slab_lock is only used for debugging and on arches that do not
b7ccc7f8 54 * have the ability to do a cmpxchg_double. It only protects:
881db7fb 55 * A. page->freelist -> List of object free in a page
b7ccc7f8
MW
56 * B. page->inuse -> Number of objects in use
57 * C. page->objects -> Number of objects in page
58 * D. page->frozen -> frozen state
881db7fb
CL
59 *
60 * If a slab is frozen then it is exempt from list management. It is not
61 * on any list. The processor that froze the slab is the one who can
62 * perform list operations on the page. Other processors may put objects
63 * onto the freelist but the processor that froze the slab is the only
64 * one that can retrieve the objects from the page's freelist.
81819f0f
CL
65 *
66 * The list_lock protects the partial and full list on each node and
67 * the partial slab counter. If taken then no new slabs may be added or
68 * removed from the lists nor make the number of partial slabs be modified.
69 * (Note that the total number of slabs is an atomic value that may be
70 * modified without taking the list lock).
71 *
72 * The list_lock is a centralized lock and thus we avoid taking it as
73 * much as possible. As long as SLUB does not have to handle partial
74 * slabs, operations can continue without any centralized lock. F.e.
75 * allocating a long series of objects that fill up slabs does not require
76 * the list lock.
81819f0f
CL
77 * Interrupts are disabled during allocation and deallocation in order to
78 * make the slab allocator safe to use in the context of an irq. In addition
79 * interrupts are disabled to ensure that the processor does not change
80 * while handling per_cpu slabs, due to kernel preemption.
81 *
82 * SLUB assigns one slab for allocation to each processor.
83 * Allocations only occur from these slabs called cpu slabs.
84 *
672bba3a
CL
85 * Slabs with free elements are kept on a partial list and during regular
86 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 87 * freed then the slab will show up again on the partial lists.
672bba3a
CL
88 * We track full slabs for debugging purposes though because otherwise we
89 * cannot scan all objects.
81819f0f
CL
90 *
91 * Slabs are freed when they become empty. Teardown and setup is
92 * minimal so we rely on the page allocators per cpu caches for
93 * fast frees and allocs.
94 *
95 * Overloading of page flags that are otherwise used for LRU management.
96 *
4b6f0750
CL
97 * PageActive The slab is frozen and exempt from list processing.
98 * This means that the slab is dedicated to a purpose
99 * such as satisfying allocations for a specific
100 * processor. Objects may be freed in the slab while
101 * it is frozen but slab_free will then skip the usual
102 * list operations. It is up to the processor holding
103 * the slab to integrate the slab into the slab lists
104 * when the slab is no longer needed.
105 *
106 * One use of this flag is to mark slabs that are
107 * used for allocations. Then such a slab becomes a cpu
108 * slab. The cpu slab may be equipped with an additional
dfb4f096 109 * freelist that allows lockless access to
894b8788
CL
110 * free objects in addition to the regular freelist
111 * that requires the slab lock.
81819f0f
CL
112 *
113 * PageError Slab requires special handling due to debug
114 * options set. This moves slab handling out of
894b8788 115 * the fast path and disables lockless freelists.
81819f0f
CL
116 */
117
af537b0a
CL
118static inline int kmem_cache_debug(struct kmem_cache *s)
119{
5577bd8a 120#ifdef CONFIG_SLUB_DEBUG
af537b0a 121 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a 122#else
af537b0a 123 return 0;
5577bd8a 124#endif
af537b0a 125}
5577bd8a 126
117d54df 127void *fixup_red_left(struct kmem_cache *s, void *p)
d86bd1be
JK
128{
129 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
130 p += s->red_left_pad;
131
132 return p;
133}
134
345c905d
JK
135static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
136{
137#ifdef CONFIG_SLUB_CPU_PARTIAL
138 return !kmem_cache_debug(s);
139#else
140 return false;
141#endif
142}
143
81819f0f
CL
144/*
145 * Issues still to be resolved:
146 *
81819f0f
CL
147 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
148 *
81819f0f
CL
149 * - Variable sizing of the per node arrays
150 */
151
152/* Enable to test recovery from slab corruption on boot */
153#undef SLUB_RESILIENCY_TEST
154
b789ef51
CL
155/* Enable to log cmpxchg failures */
156#undef SLUB_DEBUG_CMPXCHG
157
2086d26a
CL
158/*
159 * Mininum number of partial slabs. These will be left on the partial
160 * lists even if they are empty. kmem_cache_shrink may reclaim them.
161 */
76be8950 162#define MIN_PARTIAL 5
e95eed57 163
2086d26a
CL
164/*
165 * Maximum number of desirable partial slabs.
166 * The existence of more partial slabs makes kmem_cache_shrink
721ae22a 167 * sort the partial list by the number of objects in use.
2086d26a
CL
168 */
169#define MAX_PARTIAL 10
170
becfda68 171#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
81819f0f 172 SLAB_POISON | SLAB_STORE_USER)
672bba3a 173
149daaf3
LA
174/*
175 * These debug flags cannot use CMPXCHG because there might be consistency
176 * issues when checking or reading debug information
177 */
178#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
179 SLAB_TRACE)
180
181
fa5ec8a1 182/*
3de47213
DR
183 * Debugging flags that require metadata to be stored in the slab. These get
184 * disabled when slub_debug=O is used and a cache's min order increases with
185 * metadata.
fa5ec8a1 186 */
3de47213 187#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1 188
210b5c06
CG
189#define OO_SHIFT 16
190#define OO_MASK ((1 << OO_SHIFT) - 1)
50d5c41c 191#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
210b5c06 192
81819f0f 193/* Internal SLUB flags */
d50112ed 194/* Poison object */
4fd0b46e 195#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
d50112ed 196/* Use cmpxchg_double */
4fd0b46e 197#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
81819f0f 198
02cbc874
CL
199/*
200 * Tracking user of a slab.
201 */
d6543e39 202#define TRACK_ADDRS_COUNT 16
02cbc874 203struct track {
ce71e27c 204 unsigned long addr; /* Called from address */
d6543e39
BG
205#ifdef CONFIG_STACKTRACE
206 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
207#endif
02cbc874
CL
208 int cpu; /* Was running on cpu */
209 int pid; /* Pid context */
210 unsigned long when; /* When did the operation occur */
211};
212
213enum track_item { TRACK_ALLOC, TRACK_FREE };
214
ab4d5ed5 215#ifdef CONFIG_SYSFS
81819f0f
CL
216static int sysfs_slab_add(struct kmem_cache *);
217static int sysfs_slab_alias(struct kmem_cache *, const char *);
107dab5c 218static void memcg_propagate_slab_attrs(struct kmem_cache *s);
bf5eb3de 219static void sysfs_slab_remove(struct kmem_cache *s);
81819f0f 220#else
0c710013
CL
221static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
222static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
223 { return 0; }
107dab5c 224static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
bf5eb3de 225static inline void sysfs_slab_remove(struct kmem_cache *s) { }
81819f0f
CL
226#endif
227
4fdccdfb 228static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc
CL
229{
230#ifdef CONFIG_SLUB_STATS
88da03a6
CL
231 /*
232 * The rmw is racy on a preemptible kernel but this is acceptable, so
233 * avoid this_cpu_add()'s irq-disable overhead.
234 */
235 raw_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc
CL
236#endif
237}
238
81819f0f
CL
239/********************************************************************
240 * Core slab cache functions
241 *******************************************************************/
242
2482ddec
KC
243/*
244 * Returns freelist pointer (ptr). With hardening, this is obfuscated
245 * with an XOR of the address where the pointer is held and a per-cache
246 * random number.
247 */
248static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
249 unsigned long ptr_addr)
250{
251#ifdef CONFIG_SLAB_FREELIST_HARDENED
d36a63a9
AK
252 /*
253 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
254 * Normally, this doesn't cause any issues, as both set_freepointer()
255 * and get_freepointer() are called with a pointer with the same tag.
256 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
257 * example, when __free_slub() iterates over objects in a cache, it
258 * passes untagged pointers to check_object(). check_object() in turns
259 * calls get_freepointer() with an untagged pointer, which causes the
260 * freepointer to be restored incorrectly.
261 */
262 return (void *)((unsigned long)ptr ^ s->random ^
263 (unsigned long)kasan_reset_tag((void *)ptr_addr));
2482ddec
KC
264#else
265 return ptr;
266#endif
267}
268
269/* Returns the freelist pointer recorded at location ptr_addr. */
270static inline void *freelist_dereference(const struct kmem_cache *s,
271 void *ptr_addr)
272{
273 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
274 (unsigned long)ptr_addr);
275}
276
7656c72b
CL
277static inline void *get_freepointer(struct kmem_cache *s, void *object)
278{
2482ddec 279 return freelist_dereference(s, object + s->offset);
7656c72b
CL
280}
281
0ad9500e
ED
282static void prefetch_freepointer(const struct kmem_cache *s, void *object)
283{
0882ff91 284 prefetch(object + s->offset);
0ad9500e
ED
285}
286
1393d9a1
CL
287static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
288{
2482ddec 289 unsigned long freepointer_addr;
1393d9a1
CL
290 void *p;
291
922d566c
JK
292 if (!debug_pagealloc_enabled())
293 return get_freepointer(s, object);
294
2482ddec
KC
295 freepointer_addr = (unsigned long)object + s->offset;
296 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
297 return freelist_ptr(s, p, freepointer_addr);
1393d9a1
CL
298}
299
7656c72b
CL
300static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
301{
2482ddec
KC
302 unsigned long freeptr_addr = (unsigned long)object + s->offset;
303
ce6fa91b
AP
304#ifdef CONFIG_SLAB_FREELIST_HARDENED
305 BUG_ON(object == fp); /* naive detection of double free or corruption */
306#endif
307
2482ddec 308 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
7656c72b
CL
309}
310
311/* Loop over all objects in a slab */
224a88be 312#define for_each_object(__p, __s, __addr, __objects) \
d86bd1be
JK
313 for (__p = fixup_red_left(__s, __addr); \
314 __p < (__addr) + (__objects) * (__s)->size; \
315 __p += (__s)->size)
7656c72b 316
7656c72b 317/* Determine object index from a given position */
284b50dd 318static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
7656c72b 319{
6373dca1 320 return (kasan_reset_tag(p) - addr) / s->size;
7656c72b
CL
321}
322
9736d2a9 323static inline unsigned int order_objects(unsigned int order, unsigned int size)
ab9a0f19 324{
9736d2a9 325 return ((unsigned int)PAGE_SIZE << order) / size;
ab9a0f19
LJ
326}
327
19af27af 328static inline struct kmem_cache_order_objects oo_make(unsigned int order,
9736d2a9 329 unsigned int size)
834f3d11
CL
330{
331 struct kmem_cache_order_objects x = {
9736d2a9 332 (order << OO_SHIFT) + order_objects(order, size)
834f3d11
CL
333 };
334
335 return x;
336}
337
19af27af 338static inline unsigned int oo_order(struct kmem_cache_order_objects x)
834f3d11 339{
210b5c06 340 return x.x >> OO_SHIFT;
834f3d11
CL
341}
342
19af27af 343static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
834f3d11 344{
210b5c06 345 return x.x & OO_MASK;
834f3d11
CL
346}
347
881db7fb
CL
348/*
349 * Per slab locking using the pagelock
350 */
351static __always_inline void slab_lock(struct page *page)
352{
48c935ad 353 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
354 bit_spin_lock(PG_locked, &page->flags);
355}
356
357static __always_inline void slab_unlock(struct page *page)
358{
48c935ad 359 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
360 __bit_spin_unlock(PG_locked, &page->flags);
361}
362
1d07171c
CL
363/* Interrupts must be disabled (for the fallback code to work right) */
364static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
365 void *freelist_old, unsigned long counters_old,
366 void *freelist_new, unsigned long counters_new,
367 const char *n)
368{
369 VM_BUG_ON(!irqs_disabled());
2565409f
HC
370#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
371 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c 372 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 373 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
374 freelist_old, counters_old,
375 freelist_new, counters_new))
6f6528a1 376 return true;
1d07171c
CL
377 } else
378#endif
379 {
380 slab_lock(page);
d0e0ac97
CG
381 if (page->freelist == freelist_old &&
382 page->counters == counters_old) {
1d07171c 383 page->freelist = freelist_new;
7d27a04b 384 page->counters = counters_new;
1d07171c 385 slab_unlock(page);
6f6528a1 386 return true;
1d07171c
CL
387 }
388 slab_unlock(page);
389 }
390
391 cpu_relax();
392 stat(s, CMPXCHG_DOUBLE_FAIL);
393
394#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 395 pr_info("%s %s: cmpxchg double redo ", n, s->name);
1d07171c
CL
396#endif
397
6f6528a1 398 return false;
1d07171c
CL
399}
400
b789ef51
CL
401static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
402 void *freelist_old, unsigned long counters_old,
403 void *freelist_new, unsigned long counters_new,
404 const char *n)
405{
2565409f
HC
406#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
407 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51 408 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 409 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
410 freelist_old, counters_old,
411 freelist_new, counters_new))
6f6528a1 412 return true;
b789ef51
CL
413 } else
414#endif
415 {
1d07171c
CL
416 unsigned long flags;
417
418 local_irq_save(flags);
881db7fb 419 slab_lock(page);
d0e0ac97
CG
420 if (page->freelist == freelist_old &&
421 page->counters == counters_old) {
b789ef51 422 page->freelist = freelist_new;
7d27a04b 423 page->counters = counters_new;
881db7fb 424 slab_unlock(page);
1d07171c 425 local_irq_restore(flags);
6f6528a1 426 return true;
b789ef51 427 }
881db7fb 428 slab_unlock(page);
1d07171c 429 local_irq_restore(flags);
b789ef51
CL
430 }
431
432 cpu_relax();
433 stat(s, CMPXCHG_DOUBLE_FAIL);
434
435#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 436 pr_info("%s %s: cmpxchg double redo ", n, s->name);
b789ef51
CL
437#endif
438
6f6528a1 439 return false;
b789ef51
CL
440}
441
41ecc55b 442#ifdef CONFIG_SLUB_DEBUG
5f80b13a
CL
443/*
444 * Determine a map of object in use on a page.
445 *
881db7fb 446 * Node listlock must be held to guarantee that the page does
5f80b13a
CL
447 * not vanish from under us.
448 */
449static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
450{
451 void *p;
452 void *addr = page_address(page);
453
454 for (p = page->freelist; p; p = get_freepointer(s, p))
455 set_bit(slab_index(p, s, addr), map);
456}
457
870b1fbb 458static inline unsigned int size_from_object(struct kmem_cache *s)
d86bd1be
JK
459{
460 if (s->flags & SLAB_RED_ZONE)
461 return s->size - s->red_left_pad;
462
463 return s->size;
464}
465
466static inline void *restore_red_left(struct kmem_cache *s, void *p)
467{
468 if (s->flags & SLAB_RED_ZONE)
469 p -= s->red_left_pad;
470
471 return p;
472}
473
41ecc55b
CL
474/*
475 * Debug settings:
476 */
89d3c87e 477#if defined(CONFIG_SLUB_DEBUG_ON)
d50112ed 478static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
f0630fff 479#else
d50112ed 480static slab_flags_t slub_debug;
f0630fff 481#endif
41ecc55b
CL
482
483static char *slub_debug_slabs;
fa5ec8a1 484static int disable_higher_order_debug;
41ecc55b 485
a79316c6
AR
486/*
487 * slub is about to manipulate internal object metadata. This memory lies
488 * outside the range of the allocated object, so accessing it would normally
489 * be reported by kasan as a bounds error. metadata_access_enable() is used
490 * to tell kasan that these accesses are OK.
491 */
492static inline void metadata_access_enable(void)
493{
494 kasan_disable_current();
495}
496
497static inline void metadata_access_disable(void)
498{
499 kasan_enable_current();
500}
501
81819f0f
CL
502/*
503 * Object debugging
504 */
d86bd1be
JK
505
506/* Verify that a pointer has an address that is valid within a slab page */
507static inline int check_valid_pointer(struct kmem_cache *s,
508 struct page *page, void *object)
509{
510 void *base;
511
512 if (!object)
513 return 1;
514
515 base = page_address(page);
338cfaad 516 object = kasan_reset_tag(object);
d86bd1be
JK
517 object = restore_red_left(s, object);
518 if (object < base || object >= base + page->objects * s->size ||
519 (object - base) % s->size) {
520 return 0;
521 }
522
523 return 1;
524}
525
aa2efd5e
DT
526static void print_section(char *level, char *text, u8 *addr,
527 unsigned int length)
81819f0f 528{
a79316c6 529 metadata_access_enable();
aa2efd5e 530 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
ffc79d28 531 length, 1);
a79316c6 532 metadata_access_disable();
81819f0f
CL
533}
534
81819f0f
CL
535static struct track *get_track(struct kmem_cache *s, void *object,
536 enum track_item alloc)
537{
538 struct track *p;
539
540 if (s->offset)
541 p = object + s->offset + sizeof(void *);
542 else
543 p = object + s->inuse;
544
545 return p + alloc;
546}
547
548static void set_track(struct kmem_cache *s, void *object,
ce71e27c 549 enum track_item alloc, unsigned long addr)
81819f0f 550{
1a00df4a 551 struct track *p = get_track(s, object, alloc);
81819f0f 552
81819f0f 553 if (addr) {
d6543e39
BG
554#ifdef CONFIG_STACKTRACE
555 struct stack_trace trace;
556 int i;
557
558 trace.nr_entries = 0;
559 trace.max_entries = TRACK_ADDRS_COUNT;
560 trace.entries = p->addrs;
561 trace.skip = 3;
a79316c6 562 metadata_access_enable();
d6543e39 563 save_stack_trace(&trace);
a79316c6 564 metadata_access_disable();
d6543e39
BG
565
566 /* See rant in lockdep.c */
567 if (trace.nr_entries != 0 &&
568 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
569 trace.nr_entries--;
570
571 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
572 p->addrs[i] = 0;
573#endif
81819f0f
CL
574 p->addr = addr;
575 p->cpu = smp_processor_id();
88e4ccf2 576 p->pid = current->pid;
81819f0f
CL
577 p->when = jiffies;
578 } else
579 memset(p, 0, sizeof(struct track));
580}
581
81819f0f
CL
582static void init_tracking(struct kmem_cache *s, void *object)
583{
24922684
CL
584 if (!(s->flags & SLAB_STORE_USER))
585 return;
586
ce71e27c
EGM
587 set_track(s, object, TRACK_FREE, 0UL);
588 set_track(s, object, TRACK_ALLOC, 0UL);
81819f0f
CL
589}
590
86609d33 591static void print_track(const char *s, struct track *t, unsigned long pr_time)
81819f0f
CL
592{
593 if (!t->addr)
594 return;
595
f9f58285 596 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
86609d33 597 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
d6543e39
BG
598#ifdef CONFIG_STACKTRACE
599 {
600 int i;
601 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
602 if (t->addrs[i])
f9f58285 603 pr_err("\t%pS\n", (void *)t->addrs[i]);
d6543e39
BG
604 else
605 break;
606 }
607#endif
24922684
CL
608}
609
610static void print_tracking(struct kmem_cache *s, void *object)
611{
86609d33 612 unsigned long pr_time = jiffies;
24922684
CL
613 if (!(s->flags & SLAB_STORE_USER))
614 return;
615
86609d33
CP
616 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
617 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
24922684
CL
618}
619
620static void print_page_info(struct page *page)
621{
f9f58285 622 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
d0e0ac97 623 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
624
625}
626
627static void slab_bug(struct kmem_cache *s, char *fmt, ...)
628{
ecc42fbe 629 struct va_format vaf;
24922684 630 va_list args;
24922684
CL
631
632 va_start(args, fmt);
ecc42fbe
FF
633 vaf.fmt = fmt;
634 vaf.va = &args;
f9f58285 635 pr_err("=============================================================================\n");
ecc42fbe 636 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
f9f58285 637 pr_err("-----------------------------------------------------------------------------\n\n");
645df230 638
373d4d09 639 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
ecc42fbe 640 va_end(args);
81819f0f
CL
641}
642
24922684
CL
643static void slab_fix(struct kmem_cache *s, char *fmt, ...)
644{
ecc42fbe 645 struct va_format vaf;
24922684 646 va_list args;
24922684
CL
647
648 va_start(args, fmt);
ecc42fbe
FF
649 vaf.fmt = fmt;
650 vaf.va = &args;
651 pr_err("FIX %s: %pV\n", s->name, &vaf);
24922684 652 va_end(args);
24922684
CL
653}
654
655static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
656{
657 unsigned int off; /* Offset of last byte */
a973e9dd 658 u8 *addr = page_address(page);
24922684
CL
659
660 print_tracking(s, p);
661
662 print_page_info(page);
663
f9f58285
FF
664 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
665 p, p - addr, get_freepointer(s, p));
24922684 666
d86bd1be 667 if (s->flags & SLAB_RED_ZONE)
aa2efd5e
DT
668 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
669 s->red_left_pad);
d86bd1be 670 else if (p > addr + 16)
aa2efd5e 671 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
81819f0f 672
aa2efd5e 673 print_section(KERN_ERR, "Object ", p,
1b473f29 674 min_t(unsigned int, s->object_size, PAGE_SIZE));
81819f0f 675 if (s->flags & SLAB_RED_ZONE)
aa2efd5e 676 print_section(KERN_ERR, "Redzone ", p + s->object_size,
3b0efdfa 677 s->inuse - s->object_size);
81819f0f 678
81819f0f
CL
679 if (s->offset)
680 off = s->offset + sizeof(void *);
681 else
682 off = s->inuse;
683
24922684 684 if (s->flags & SLAB_STORE_USER)
81819f0f 685 off += 2 * sizeof(struct track);
81819f0f 686
80a9201a
AP
687 off += kasan_metadata_size(s);
688
d86bd1be 689 if (off != size_from_object(s))
81819f0f 690 /* Beginning of the filler is the free pointer */
aa2efd5e
DT
691 print_section(KERN_ERR, "Padding ", p + off,
692 size_from_object(s) - off);
24922684
CL
693
694 dump_stack();
81819f0f
CL
695}
696
75c66def 697void object_err(struct kmem_cache *s, struct page *page,
81819f0f
CL
698 u8 *object, char *reason)
699{
3dc50637 700 slab_bug(s, "%s", reason);
24922684 701 print_trailer(s, page, object);
81819f0f
CL
702}
703
a38965bf 704static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
d0e0ac97 705 const char *fmt, ...)
81819f0f
CL
706{
707 va_list args;
708 char buf[100];
709
24922684
CL
710 va_start(args, fmt);
711 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 712 va_end(args);
3dc50637 713 slab_bug(s, "%s", buf);
24922684 714 print_page_info(page);
81819f0f
CL
715 dump_stack();
716}
717
f7cb1933 718static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0f
CL
719{
720 u8 *p = object;
721
d86bd1be
JK
722 if (s->flags & SLAB_RED_ZONE)
723 memset(p - s->red_left_pad, val, s->red_left_pad);
724
81819f0f 725 if (s->flags & __OBJECT_POISON) {
3b0efdfa
CL
726 memset(p, POISON_FREE, s->object_size - 1);
727 p[s->object_size - 1] = POISON_END;
81819f0f
CL
728 }
729
730 if (s->flags & SLAB_RED_ZONE)
3b0efdfa 731 memset(p + s->object_size, val, s->inuse - s->object_size);
81819f0f
CL
732}
733
24922684
CL
734static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
735 void *from, void *to)
736{
737 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
738 memset(from, data, to - from);
739}
740
741static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
742 u8 *object, char *what,
06428780 743 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
744{
745 u8 *fault;
746 u8 *end;
747
a79316c6 748 metadata_access_enable();
79824820 749 fault = memchr_inv(start, value, bytes);
a79316c6 750 metadata_access_disable();
24922684
CL
751 if (!fault)
752 return 1;
753
754 end = start + bytes;
755 while (end > fault && end[-1] == value)
756 end--;
757
758 slab_bug(s, "%s overwritten", what);
f9f58285 759 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
24922684
CL
760 fault, end - 1, fault[0], value);
761 print_trailer(s, page, object);
762
763 restore_bytes(s, what, value, fault, end);
764 return 0;
81819f0f
CL
765}
766
81819f0f
CL
767/*
768 * Object layout:
769 *
770 * object address
771 * Bytes of the object to be managed.
772 * If the freepointer may overlay the object then the free
773 * pointer is the first word of the object.
672bba3a 774 *
81819f0f
CL
775 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
776 * 0xa5 (POISON_END)
777 *
3b0efdfa 778 * object + s->object_size
81819f0f 779 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a 780 * Padding is extended by another word if Redzoning is enabled and
3b0efdfa 781 * object_size == inuse.
672bba3a 782 *
81819f0f
CL
783 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
784 * 0xcc (RED_ACTIVE) for objects in use.
785 *
786 * object + s->inuse
672bba3a
CL
787 * Meta data starts here.
788 *
81819f0f
CL
789 * A. Free pointer (if we cannot overwrite object on free)
790 * B. Tracking data for SLAB_STORE_USER
672bba3a 791 * C. Padding to reach required alignment boundary or at mininum
6446faa2 792 * one word if debugging is on to be able to detect writes
672bba3a
CL
793 * before the word boundary.
794 *
795 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
796 *
797 * object + s->size
672bba3a 798 * Nothing is used beyond s->size.
81819f0f 799 *
3b0efdfa 800 * If slabcaches are merged then the object_size and inuse boundaries are mostly
672bba3a 801 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
802 * may be used with merged slabcaches.
803 */
804
81819f0f
CL
805static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
806{
807 unsigned long off = s->inuse; /* The end of info */
808
809 if (s->offset)
810 /* Freepointer is placed after the object. */
811 off += sizeof(void *);
812
813 if (s->flags & SLAB_STORE_USER)
814 /* We also have user information there */
815 off += 2 * sizeof(struct track);
816
80a9201a
AP
817 off += kasan_metadata_size(s);
818
d86bd1be 819 if (size_from_object(s) == off)
81819f0f
CL
820 return 1;
821
24922684 822 return check_bytes_and_report(s, page, p, "Object padding",
d86bd1be 823 p + off, POISON_INUSE, size_from_object(s) - off);
81819f0f
CL
824}
825
39b26464 826/* Check the pad bytes at the end of a slab page */
81819f0f
CL
827static int slab_pad_check(struct kmem_cache *s, struct page *page)
828{
24922684
CL
829 u8 *start;
830 u8 *fault;
831 u8 *end;
5d682681 832 u8 *pad;
24922684
CL
833 int length;
834 int remainder;
81819f0f
CL
835
836 if (!(s->flags & SLAB_POISON))
837 return 1;
838
a973e9dd 839 start = page_address(page);
9736d2a9 840 length = PAGE_SIZE << compound_order(page);
39b26464
CL
841 end = start + length;
842 remainder = length % s->size;
81819f0f
CL
843 if (!remainder)
844 return 1;
845
5d682681 846 pad = end - remainder;
a79316c6 847 metadata_access_enable();
5d682681 848 fault = memchr_inv(pad, POISON_INUSE, remainder);
a79316c6 849 metadata_access_disable();
24922684
CL
850 if (!fault)
851 return 1;
852 while (end > fault && end[-1] == POISON_INUSE)
853 end--;
854
855 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
5d682681 856 print_section(KERN_ERR, "Padding ", pad, remainder);
24922684 857
5d682681 858 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
24922684 859 return 0;
81819f0f
CL
860}
861
862static int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 863 void *object, u8 val)
81819f0f
CL
864{
865 u8 *p = object;
3b0efdfa 866 u8 *endobject = object + s->object_size;
81819f0f
CL
867
868 if (s->flags & SLAB_RED_ZONE) {
d86bd1be
JK
869 if (!check_bytes_and_report(s, page, object, "Redzone",
870 object - s->red_left_pad, val, s->red_left_pad))
871 return 0;
872
24922684 873 if (!check_bytes_and_report(s, page, object, "Redzone",
3b0efdfa 874 endobject, val, s->inuse - s->object_size))
81819f0f 875 return 0;
81819f0f 876 } else {
3b0efdfa 877 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
3adbefee 878 check_bytes_and_report(s, page, p, "Alignment padding",
d0e0ac97
CG
879 endobject, POISON_INUSE,
880 s->inuse - s->object_size);
3adbefee 881 }
81819f0f
CL
882 }
883
884 if (s->flags & SLAB_POISON) {
f7cb1933 885 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
24922684 886 (!check_bytes_and_report(s, page, p, "Poison", p,
3b0efdfa 887 POISON_FREE, s->object_size - 1) ||
24922684 888 !check_bytes_and_report(s, page, p, "Poison",
3b0efdfa 889 p + s->object_size - 1, POISON_END, 1)))
81819f0f 890 return 0;
81819f0f
CL
891 /*
892 * check_pad_bytes cleans up on its own.
893 */
894 check_pad_bytes(s, page, p);
895 }
896
f7cb1933 897 if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0f
CL
898 /*
899 * Object and freepointer overlap. Cannot check
900 * freepointer while object is allocated.
901 */
902 return 1;
903
904 /* Check free pointer validity */
905 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
906 object_err(s, page, p, "Freepointer corrupt");
907 /*
9f6c708e 908 * No choice but to zap it and thus lose the remainder
81819f0f 909 * of the free objects in this slab. May cause
672bba3a 910 * another error because the object count is now wrong.
81819f0f 911 */
a973e9dd 912 set_freepointer(s, p, NULL);
81819f0f
CL
913 return 0;
914 }
915 return 1;
916}
917
918static int check_slab(struct kmem_cache *s, struct page *page)
919{
39b26464
CL
920 int maxobj;
921
81819f0f
CL
922 VM_BUG_ON(!irqs_disabled());
923
924 if (!PageSlab(page)) {
24922684 925 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
926 return 0;
927 }
39b26464 928
9736d2a9 929 maxobj = order_objects(compound_order(page), s->size);
39b26464
CL
930 if (page->objects > maxobj) {
931 slab_err(s, page, "objects %u > max %u",
f6edde9c 932 page->objects, maxobj);
39b26464
CL
933 return 0;
934 }
935 if (page->inuse > page->objects) {
24922684 936 slab_err(s, page, "inuse %u > max %u",
f6edde9c 937 page->inuse, page->objects);
81819f0f
CL
938 return 0;
939 }
940 /* Slab_pad_check fixes things up after itself */
941 slab_pad_check(s, page);
942 return 1;
943}
944
945/*
672bba3a
CL
946 * Determine if a certain object on a page is on the freelist. Must hold the
947 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
948 */
949static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
950{
951 int nr = 0;
881db7fb 952 void *fp;
81819f0f 953 void *object = NULL;
f6edde9c 954 int max_objects;
81819f0f 955
881db7fb 956 fp = page->freelist;
39b26464 957 while (fp && nr <= page->objects) {
81819f0f
CL
958 if (fp == search)
959 return 1;
960 if (!check_valid_pointer(s, page, fp)) {
961 if (object) {
962 object_err(s, page, object,
963 "Freechain corrupt");
a973e9dd 964 set_freepointer(s, object, NULL);
81819f0f 965 } else {
24922684 966 slab_err(s, page, "Freepointer corrupt");
a973e9dd 967 page->freelist = NULL;
39b26464 968 page->inuse = page->objects;
24922684 969 slab_fix(s, "Freelist cleared");
81819f0f
CL
970 return 0;
971 }
972 break;
973 }
974 object = fp;
975 fp = get_freepointer(s, object);
976 nr++;
977 }
978
9736d2a9 979 max_objects = order_objects(compound_order(page), s->size);
210b5c06
CG
980 if (max_objects > MAX_OBJS_PER_PAGE)
981 max_objects = MAX_OBJS_PER_PAGE;
224a88be
CL
982
983 if (page->objects != max_objects) {
756a025f
JP
984 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
985 page->objects, max_objects);
224a88be
CL
986 page->objects = max_objects;
987 slab_fix(s, "Number of objects adjusted.");
988 }
39b26464 989 if (page->inuse != page->objects - nr) {
756a025f
JP
990 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
991 page->inuse, page->objects - nr);
39b26464 992 page->inuse = page->objects - nr;
24922684 993 slab_fix(s, "Object count adjusted.");
81819f0f
CL
994 }
995 return search == NULL;
996}
997
0121c619
CL
998static void trace(struct kmem_cache *s, struct page *page, void *object,
999 int alloc)
3ec09742
CL
1000{
1001 if (s->flags & SLAB_TRACE) {
f9f58285 1002 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
3ec09742
CL
1003 s->name,
1004 alloc ? "alloc" : "free",
1005 object, page->inuse,
1006 page->freelist);
1007
1008 if (!alloc)
aa2efd5e 1009 print_section(KERN_INFO, "Object ", (void *)object,
d0e0ac97 1010 s->object_size);
3ec09742
CL
1011
1012 dump_stack();
1013 }
1014}
1015
643b1138 1016/*
672bba3a 1017 * Tracking of fully allocated slabs for debugging purposes.
643b1138 1018 */
5cc6eee8
CL
1019static void add_full(struct kmem_cache *s,
1020 struct kmem_cache_node *n, struct page *page)
643b1138 1021{
5cc6eee8
CL
1022 if (!(s->flags & SLAB_STORE_USER))
1023 return;
1024
255d0884 1025 lockdep_assert_held(&n->list_lock);
643b1138 1026 list_add(&page->lru, &n->full);
643b1138
CL
1027}
1028
c65c1877 1029static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
643b1138 1030{
643b1138
CL
1031 if (!(s->flags & SLAB_STORE_USER))
1032 return;
1033
255d0884 1034 lockdep_assert_held(&n->list_lock);
643b1138 1035 list_del(&page->lru);
643b1138
CL
1036}
1037
0f389ec6
CL
1038/* Tracking of the number of slabs for debugging purposes */
1039static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1040{
1041 struct kmem_cache_node *n = get_node(s, node);
1042
1043 return atomic_long_read(&n->nr_slabs);
1044}
1045
26c02cf0
AB
1046static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1047{
1048 return atomic_long_read(&n->nr_slabs);
1049}
1050
205ab99d 1051static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1052{
1053 struct kmem_cache_node *n = get_node(s, node);
1054
1055 /*
1056 * May be called early in order to allocate a slab for the
1057 * kmem_cache_node structure. Solve the chicken-egg
1058 * dilemma by deferring the increment of the count during
1059 * bootstrap (see early_kmem_cache_node_alloc).
1060 */
338b2642 1061 if (likely(n)) {
0f389ec6 1062 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
1063 atomic_long_add(objects, &n->total_objects);
1064 }
0f389ec6 1065}
205ab99d 1066static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1067{
1068 struct kmem_cache_node *n = get_node(s, node);
1069
1070 atomic_long_dec(&n->nr_slabs);
205ab99d 1071 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
1072}
1073
1074/* Object debug checks for alloc/free paths */
3ec09742
CL
1075static void setup_object_debug(struct kmem_cache *s, struct page *page,
1076 void *object)
1077{
1078 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1079 return;
1080
f7cb1933 1081 init_object(s, object, SLUB_RED_INACTIVE);
3ec09742
CL
1082 init_tracking(s, object);
1083}
1084
a7101224
AK
1085static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
1086{
1087 if (!(s->flags & SLAB_POISON))
1088 return;
1089
1090 metadata_access_enable();
1091 memset(addr, POISON_INUSE, PAGE_SIZE << order);
1092 metadata_access_disable();
1093}
1094
becfda68 1095static inline int alloc_consistency_checks(struct kmem_cache *s,
d0e0ac97 1096 struct page *page,
ce71e27c 1097 void *object, unsigned long addr)
81819f0f
CL
1098{
1099 if (!check_slab(s, page))
becfda68 1100 return 0;
81819f0f 1101
81819f0f
CL
1102 if (!check_valid_pointer(s, page, object)) {
1103 object_err(s, page, object, "Freelist Pointer check fails");
becfda68 1104 return 0;
81819f0f
CL
1105 }
1106
f7cb1933 1107 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
becfda68
LA
1108 return 0;
1109
1110 return 1;
1111}
1112
1113static noinline int alloc_debug_processing(struct kmem_cache *s,
1114 struct page *page,
1115 void *object, unsigned long addr)
1116{
1117 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1118 if (!alloc_consistency_checks(s, page, object, addr))
1119 goto bad;
1120 }
81819f0f 1121
3ec09742
CL
1122 /* Success perform special debug activities for allocs */
1123 if (s->flags & SLAB_STORE_USER)
1124 set_track(s, object, TRACK_ALLOC, addr);
1125 trace(s, page, object, 1);
f7cb1933 1126 init_object(s, object, SLUB_RED_ACTIVE);
81819f0f 1127 return 1;
3ec09742 1128
81819f0f
CL
1129bad:
1130 if (PageSlab(page)) {
1131 /*
1132 * If this is a slab page then lets do the best we can
1133 * to avoid issues in the future. Marking all objects
672bba3a 1134 * as used avoids touching the remaining objects.
81819f0f 1135 */
24922684 1136 slab_fix(s, "Marking all objects used");
39b26464 1137 page->inuse = page->objects;
a973e9dd 1138 page->freelist = NULL;
81819f0f
CL
1139 }
1140 return 0;
1141}
1142
becfda68
LA
1143static inline int free_consistency_checks(struct kmem_cache *s,
1144 struct page *page, void *object, unsigned long addr)
81819f0f 1145{
81819f0f 1146 if (!check_valid_pointer(s, page, object)) {
70d71228 1147 slab_err(s, page, "Invalid object pointer 0x%p", object);
becfda68 1148 return 0;
81819f0f
CL
1149 }
1150
1151 if (on_freelist(s, page, object)) {
24922684 1152 object_err(s, page, object, "Object already free");
becfda68 1153 return 0;
81819f0f
CL
1154 }
1155
f7cb1933 1156 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
becfda68 1157 return 0;
81819f0f 1158
1b4f59e3 1159 if (unlikely(s != page->slab_cache)) {
3adbefee 1160 if (!PageSlab(page)) {
756a025f
JP
1161 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1162 object);
1b4f59e3 1163 } else if (!page->slab_cache) {
f9f58285
FF
1164 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1165 object);
70d71228 1166 dump_stack();
06428780 1167 } else
24922684
CL
1168 object_err(s, page, object,
1169 "page slab pointer corrupt.");
becfda68
LA
1170 return 0;
1171 }
1172 return 1;
1173}
1174
1175/* Supports checking bulk free of a constructed freelist */
1176static noinline int free_debug_processing(
1177 struct kmem_cache *s, struct page *page,
1178 void *head, void *tail, int bulk_cnt,
1179 unsigned long addr)
1180{
1181 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1182 void *object = head;
1183 int cnt = 0;
1184 unsigned long uninitialized_var(flags);
1185 int ret = 0;
1186
1187 spin_lock_irqsave(&n->list_lock, flags);
1188 slab_lock(page);
1189
1190 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1191 if (!check_slab(s, page))
1192 goto out;
1193 }
1194
1195next_object:
1196 cnt++;
1197
1198 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1199 if (!free_consistency_checks(s, page, object, addr))
1200 goto out;
81819f0f 1201 }
3ec09742 1202
3ec09742
CL
1203 if (s->flags & SLAB_STORE_USER)
1204 set_track(s, object, TRACK_FREE, addr);
1205 trace(s, page, object, 0);
81084651 1206 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
f7cb1933 1207 init_object(s, object, SLUB_RED_INACTIVE);
81084651
JDB
1208
1209 /* Reached end of constructed freelist yet? */
1210 if (object != tail) {
1211 object = get_freepointer(s, object);
1212 goto next_object;
1213 }
804aa132
LA
1214 ret = 1;
1215
5c2e4bbb 1216out:
81084651
JDB
1217 if (cnt != bulk_cnt)
1218 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1219 bulk_cnt, cnt);
1220
881db7fb 1221 slab_unlock(page);
282acb43 1222 spin_unlock_irqrestore(&n->list_lock, flags);
804aa132
LA
1223 if (!ret)
1224 slab_fix(s, "Object at 0x%p not freed", object);
1225 return ret;
81819f0f
CL
1226}
1227
41ecc55b
CL
1228static int __init setup_slub_debug(char *str)
1229{
f0630fff
CL
1230 slub_debug = DEBUG_DEFAULT_FLAGS;
1231 if (*str++ != '=' || !*str)
1232 /*
1233 * No options specified. Switch on full debugging.
1234 */
1235 goto out;
1236
1237 if (*str == ',')
1238 /*
1239 * No options but restriction on slabs. This means full
1240 * debugging for slabs matching a pattern.
1241 */
1242 goto check_slabs;
1243
1244 slub_debug = 0;
1245 if (*str == '-')
1246 /*
1247 * Switch off all debugging measures.
1248 */
1249 goto out;
1250
1251 /*
1252 * Determine which debug features should be switched on
1253 */
06428780 1254 for (; *str && *str != ','; str++) {
f0630fff
CL
1255 switch (tolower(*str)) {
1256 case 'f':
becfda68 1257 slub_debug |= SLAB_CONSISTENCY_CHECKS;
f0630fff
CL
1258 break;
1259 case 'z':
1260 slub_debug |= SLAB_RED_ZONE;
1261 break;
1262 case 'p':
1263 slub_debug |= SLAB_POISON;
1264 break;
1265 case 'u':
1266 slub_debug |= SLAB_STORE_USER;
1267 break;
1268 case 't':
1269 slub_debug |= SLAB_TRACE;
1270 break;
4c13dd3b
DM
1271 case 'a':
1272 slub_debug |= SLAB_FAILSLAB;
1273 break;
08303a73
CA
1274 case 'o':
1275 /*
1276 * Avoid enabling debugging on caches if its minimum
1277 * order would increase as a result.
1278 */
1279 disable_higher_order_debug = 1;
1280 break;
f0630fff 1281 default:
f9f58285
FF
1282 pr_err("slub_debug option '%c' unknown. skipped\n",
1283 *str);
f0630fff 1284 }
41ecc55b
CL
1285 }
1286
f0630fff 1287check_slabs:
41ecc55b
CL
1288 if (*str == ',')
1289 slub_debug_slabs = str + 1;
f0630fff 1290out:
41ecc55b
CL
1291 return 1;
1292}
1293
1294__setup("slub_debug", setup_slub_debug);
1295
c5fd3ca0
AT
1296/*
1297 * kmem_cache_flags - apply debugging options to the cache
1298 * @object_size: the size of an object without meta data
1299 * @flags: flags to set
1300 * @name: name of the cache
1301 * @ctor: constructor function
1302 *
1303 * Debug option(s) are applied to @flags. In addition to the debug
1304 * option(s), if a slab name (or multiple) is specified i.e.
1305 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1306 * then only the select slabs will receive the debug option(s).
1307 */
0293d1fd 1308slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112ed 1309 slab_flags_t flags, const char *name,
51cc5068 1310 void (*ctor)(void *))
41ecc55b 1311{
c5fd3ca0
AT
1312 char *iter;
1313 size_t len;
1314
1315 /* If slub_debug = 0, it folds into the if conditional. */
1316 if (!slub_debug_slabs)
1317 return flags | slub_debug;
1318
1319 len = strlen(name);
1320 iter = slub_debug_slabs;
1321 while (*iter) {
1322 char *end, *glob;
1323 size_t cmplen;
1324
1325 end = strchr(iter, ',');
1326 if (!end)
1327 end = iter + strlen(iter);
1328
1329 glob = strnchr(iter, end - iter, '*');
1330 if (glob)
1331 cmplen = glob - iter;
1332 else
1333 cmplen = max_t(size_t, len, (end - iter));
1334
1335 if (!strncmp(name, iter, cmplen)) {
1336 flags |= slub_debug;
1337 break;
1338 }
1339
1340 if (!*end)
1341 break;
1342 iter = end + 1;
1343 }
ba0268a8
CL
1344
1345 return flags;
41ecc55b 1346}
b4a64718 1347#else /* !CONFIG_SLUB_DEBUG */
3ec09742
CL
1348static inline void setup_object_debug(struct kmem_cache *s,
1349 struct page *page, void *object) {}
a7101224
AK
1350static inline void setup_page_debug(struct kmem_cache *s,
1351 void *addr, int order) {}
41ecc55b 1352
3ec09742 1353static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c 1354 struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b 1355
282acb43 1356static inline int free_debug_processing(
81084651
JDB
1357 struct kmem_cache *s, struct page *page,
1358 void *head, void *tail, int bulk_cnt,
282acb43 1359 unsigned long addr) { return 0; }
41ecc55b 1360
41ecc55b
CL
1361static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1362 { return 1; }
1363static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 1364 void *object, u8 val) { return 1; }
5cc6eee8
CL
1365static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1366 struct page *page) {}
c65c1877
PZ
1367static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1368 struct page *page) {}
0293d1fd 1369slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112ed 1370 slab_flags_t flags, const char *name,
51cc5068 1371 void (*ctor)(void *))
ba0268a8
CL
1372{
1373 return flags;
1374}
41ecc55b 1375#define slub_debug 0
0f389ec6 1376
fdaa45e9
IM
1377#define disable_higher_order_debug 0
1378
0f389ec6
CL
1379static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1380 { return 0; }
26c02cf0
AB
1381static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1382 { return 0; }
205ab99d
CL
1383static inline void inc_slabs_node(struct kmem_cache *s, int node,
1384 int objects) {}
1385static inline void dec_slabs_node(struct kmem_cache *s, int node,
1386 int objects) {}
7d550c56 1387
02e72cc6
AR
1388#endif /* CONFIG_SLUB_DEBUG */
1389
1390/*
1391 * Hooks for other subsystems that check memory allocations. In a typical
1392 * production configuration these hooks all should produce no code at all.
1393 */
0116523c 1394static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
d56791b3 1395{
53128245 1396 ptr = kasan_kmalloc_large(ptr, size, flags);
a2f77575 1397 /* As ptr might get tagged, call kmemleak hook after KASAN. */
d56791b3 1398 kmemleak_alloc(ptr, size, 1, flags);
53128245 1399 return ptr;
d56791b3
RB
1400}
1401
ee3ce779 1402static __always_inline void kfree_hook(void *x)
d56791b3
RB
1403{
1404 kmemleak_free(x);
ee3ce779 1405 kasan_kfree_large(x, _RET_IP_);
d56791b3
RB
1406}
1407
c3895391 1408static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
d56791b3
RB
1409{
1410 kmemleak_free_recursive(x, s->flags);
7d550c56 1411
02e72cc6
AR
1412 /*
1413 * Trouble is that we may no longer disable interrupts in the fast path
1414 * So in order to make the debug calls that expect irqs to be
1415 * disabled we need to disable interrupts temporarily.
1416 */
4675ff05 1417#ifdef CONFIG_LOCKDEP
02e72cc6
AR
1418 {
1419 unsigned long flags;
1420
1421 local_irq_save(flags);
02e72cc6
AR
1422 debug_check_no_locks_freed(x, s->object_size);
1423 local_irq_restore(flags);
1424 }
1425#endif
1426 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1427 debug_check_no_obj_freed(x, s->object_size);
0316bec2 1428
c3895391
AK
1429 /* KASAN might put x into memory quarantine, delaying its reuse */
1430 return kasan_slab_free(s, x, _RET_IP_);
02e72cc6 1431}
205ab99d 1432
c3895391
AK
1433static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1434 void **head, void **tail)
81084651
JDB
1435{
1436/*
1437 * Compiler cannot detect this function can be removed if slab_free_hook()
1438 * evaluates to nothing. Thus, catch all relevant config debug options here.
1439 */
4675ff05 1440#if defined(CONFIG_LOCKDEP) || \
81084651
JDB
1441 defined(CONFIG_DEBUG_KMEMLEAK) || \
1442 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1443 defined(CONFIG_KASAN)
1444
c3895391
AK
1445 void *object;
1446 void *next = *head;
1447 void *old_tail = *tail ? *tail : *head;
1448
1449 /* Head and tail of the reconstructed freelist */
1450 *head = NULL;
1451 *tail = NULL;
81084651
JDB
1452
1453 do {
c3895391
AK
1454 object = next;
1455 next = get_freepointer(s, object);
1456 /* If object's reuse doesn't have to be delayed */
1457 if (!slab_free_hook(s, object)) {
1458 /* Move object to the new freelist */
1459 set_freepointer(s, object, *head);
1460 *head = object;
1461 if (!*tail)
1462 *tail = object;
1463 }
1464 } while (object != old_tail);
1465
1466 if (*head == *tail)
1467 *tail = NULL;
1468
1469 return *head != NULL;
1470#else
1471 return true;
81084651
JDB
1472#endif
1473}
1474
4d176711 1475static void *setup_object(struct kmem_cache *s, struct page *page,
588f8ba9
TG
1476 void *object)
1477{
1478 setup_object_debug(s, page, object);
4d176711 1479 object = kasan_init_slab_obj(s, object);
588f8ba9
TG
1480 if (unlikely(s->ctor)) {
1481 kasan_unpoison_object_data(s, object);
1482 s->ctor(object);
1483 kasan_poison_object_data(s, object);
1484 }
4d176711 1485 return object;
588f8ba9
TG
1486}
1487
81819f0f
CL
1488/*
1489 * Slab allocation and freeing
1490 */
5dfb4175
VD
1491static inline struct page *alloc_slab_page(struct kmem_cache *s,
1492 gfp_t flags, int node, struct kmem_cache_order_objects oo)
65c3376a 1493{
5dfb4175 1494 struct page *page;
19af27af 1495 unsigned int order = oo_order(oo);
65c3376a 1496
2154a336 1497 if (node == NUMA_NO_NODE)
5dfb4175 1498 page = alloc_pages(flags, order);
65c3376a 1499 else
96db800f 1500 page = __alloc_pages_node(node, flags, order);
5dfb4175 1501
f3ccb2c4
VD
1502 if (page && memcg_charge_slab(page, flags, order, s)) {
1503 __free_pages(page, order);
1504 page = NULL;
1505 }
5dfb4175
VD
1506
1507 return page;
65c3376a
CL
1508}
1509
210e7a43
TG
1510#ifdef CONFIG_SLAB_FREELIST_RANDOM
1511/* Pre-initialize the random sequence cache */
1512static int init_cache_random_seq(struct kmem_cache *s)
1513{
19af27af 1514 unsigned int count = oo_objects(s->oo);
210e7a43 1515 int err;
210e7a43 1516
a810007a
SR
1517 /* Bailout if already initialised */
1518 if (s->random_seq)
1519 return 0;
1520
210e7a43
TG
1521 err = cache_random_seq_create(s, count, GFP_KERNEL);
1522 if (err) {
1523 pr_err("SLUB: Unable to initialize free list for %s\n",
1524 s->name);
1525 return err;
1526 }
1527
1528 /* Transform to an offset on the set of pages */
1529 if (s->random_seq) {
19af27af
AD
1530 unsigned int i;
1531
210e7a43
TG
1532 for (i = 0; i < count; i++)
1533 s->random_seq[i] *= s->size;
1534 }
1535 return 0;
1536}
1537
1538/* Initialize each random sequence freelist per cache */
1539static void __init init_freelist_randomization(void)
1540{
1541 struct kmem_cache *s;
1542
1543 mutex_lock(&slab_mutex);
1544
1545 list_for_each_entry(s, &slab_caches, list)
1546 init_cache_random_seq(s);
1547
1548 mutex_unlock(&slab_mutex);
1549}
1550
1551/* Get the next entry on the pre-computed freelist randomized */
1552static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1553 unsigned long *pos, void *start,
1554 unsigned long page_limit,
1555 unsigned long freelist_count)
1556{
1557 unsigned int idx;
1558
1559 /*
1560 * If the target page allocation failed, the number of objects on the
1561 * page might be smaller than the usual size defined by the cache.
1562 */
1563 do {
1564 idx = s->random_seq[*pos];
1565 *pos += 1;
1566 if (*pos >= freelist_count)
1567 *pos = 0;
1568 } while (unlikely(idx >= page_limit));
1569
1570 return (char *)start + idx;
1571}
1572
1573/* Shuffle the single linked freelist based on a random pre-computed sequence */
1574static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1575{
1576 void *start;
1577 void *cur;
1578 void *next;
1579 unsigned long idx, pos, page_limit, freelist_count;
1580
1581 if (page->objects < 2 || !s->random_seq)
1582 return false;
1583
1584 freelist_count = oo_objects(s->oo);
1585 pos = get_random_int() % freelist_count;
1586
1587 page_limit = page->objects * s->size;
1588 start = fixup_red_left(s, page_address(page));
1589
1590 /* First entry is used as the base of the freelist */
1591 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1592 freelist_count);
4d176711 1593 cur = setup_object(s, page, cur);
210e7a43
TG
1594 page->freelist = cur;
1595
1596 for (idx = 1; idx < page->objects; idx++) {
210e7a43
TG
1597 next = next_freelist_entry(s, page, &pos, start, page_limit,
1598 freelist_count);
4d176711 1599 next = setup_object(s, page, next);
210e7a43
TG
1600 set_freepointer(s, cur, next);
1601 cur = next;
1602 }
210e7a43
TG
1603 set_freepointer(s, cur, NULL);
1604
1605 return true;
1606}
1607#else
1608static inline int init_cache_random_seq(struct kmem_cache *s)
1609{
1610 return 0;
1611}
1612static inline void init_freelist_randomization(void) { }
1613static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1614{
1615 return false;
1616}
1617#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1618
81819f0f
CL
1619static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1620{
06428780 1621 struct page *page;
834f3d11 1622 struct kmem_cache_order_objects oo = s->oo;
ba52270d 1623 gfp_t alloc_gfp;
4d176711 1624 void *start, *p, *next;
588f8ba9 1625 int idx, order;
210e7a43 1626 bool shuffle;
81819f0f 1627
7e0528da
CL
1628 flags &= gfp_allowed_mask;
1629
d0164adc 1630 if (gfpflags_allow_blocking(flags))
7e0528da
CL
1631 local_irq_enable();
1632
b7a49f0d 1633 flags |= s->allocflags;
e12ba74d 1634
ba52270d
PE
1635 /*
1636 * Let the initial higher-order allocation fail under memory pressure
1637 * so we fall-back to the minimum order allocation.
1638 */
1639 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
d0164adc 1640 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
444eb2a4 1641 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
ba52270d 1642
5dfb4175 1643 page = alloc_slab_page(s, alloc_gfp, node, oo);
65c3376a
CL
1644 if (unlikely(!page)) {
1645 oo = s->min;
80c3a998 1646 alloc_gfp = flags;
65c3376a
CL
1647 /*
1648 * Allocation may have failed due to fragmentation.
1649 * Try a lower order alloc if possible
1650 */
5dfb4175 1651 page = alloc_slab_page(s, alloc_gfp, node, oo);
588f8ba9
TG
1652 if (unlikely(!page))
1653 goto out;
1654 stat(s, ORDER_FALLBACK);
65c3376a 1655 }
5a896d9e 1656
834f3d11 1657 page->objects = oo_objects(oo);
81819f0f 1658
1f458cbf 1659 order = compound_order(page);
1b4f59e3 1660 page->slab_cache = s;
c03f94cc 1661 __SetPageSlab(page);
2f064f34 1662 if (page_is_pfmemalloc(page))
072bb0aa 1663 SetPageSlabPfmemalloc(page);
81819f0f 1664
a7101224 1665 kasan_poison_slab(page);
81819f0f 1666
a7101224 1667 start = page_address(page);
81819f0f 1668
a7101224 1669 setup_page_debug(s, start, order);
0316bec2 1670
210e7a43
TG
1671 shuffle = shuffle_freelist(s, page);
1672
1673 if (!shuffle) {
4d176711
AK
1674 start = fixup_red_left(s, start);
1675 start = setup_object(s, page, start);
1676 page->freelist = start;
18e50661
AK
1677 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1678 next = p + s->size;
1679 next = setup_object(s, page, next);
1680 set_freepointer(s, p, next);
1681 p = next;
1682 }
1683 set_freepointer(s, p, NULL);
81819f0f 1684 }
81819f0f 1685
e6e82ea1 1686 page->inuse = page->objects;
8cb0a506 1687 page->frozen = 1;
588f8ba9 1688
81819f0f 1689out:
d0164adc 1690 if (gfpflags_allow_blocking(flags))
588f8ba9
TG
1691 local_irq_disable();
1692 if (!page)
1693 return NULL;
1694
7779f212 1695 mod_lruvec_page_state(page,
588f8ba9
TG
1696 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1697 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1698 1 << oo_order(oo));
1699
1700 inc_slabs_node(s, page_to_nid(page), page->objects);
1701
81819f0f
CL
1702 return page;
1703}
1704
588f8ba9
TG
1705static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1706{
1707 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
bacdcb34 1708 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
72baeef0
MH
1709 flags &= ~GFP_SLAB_BUG_MASK;
1710 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1711 invalid_mask, &invalid_mask, flags, &flags);
65b9de75 1712 dump_stack();
588f8ba9
TG
1713 }
1714
1715 return allocate_slab(s,
1716 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1717}
1718
81819f0f
CL
1719static void __free_slab(struct kmem_cache *s, struct page *page)
1720{
834f3d11
CL
1721 int order = compound_order(page);
1722 int pages = 1 << order;
81819f0f 1723
becfda68 1724 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
81819f0f
CL
1725 void *p;
1726
1727 slab_pad_check(s, page);
224a88be
CL
1728 for_each_object(p, s, page_address(page),
1729 page->objects)
f7cb1933 1730 check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0f
CL
1731 }
1732
7779f212 1733 mod_lruvec_page_state(page,
81819f0f
CL
1734 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1735 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1736 -pages);
81819f0f 1737
072bb0aa 1738 __ClearPageSlabPfmemalloc(page);
49bd5221 1739 __ClearPageSlab(page);
1f458cbf 1740
d4fc5069 1741 page->mapping = NULL;
1eb5ac64
NP
1742 if (current->reclaim_state)
1743 current->reclaim_state->reclaimed_slab += pages;
27ee57c9
VD
1744 memcg_uncharge_slab(page, order, s);
1745 __free_pages(page, order);
81819f0f
CL
1746}
1747
1748static void rcu_free_slab(struct rcu_head *h)
1749{
bf68c214 1750 struct page *page = container_of(h, struct page, rcu_head);
da9a638c 1751
1b4f59e3 1752 __free_slab(page->slab_cache, page);
81819f0f
CL
1753}
1754
1755static void free_slab(struct kmem_cache *s, struct page *page)
1756{
5f0d5a3a 1757 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
bf68c214 1758 call_rcu(&page->rcu_head, rcu_free_slab);
81819f0f
CL
1759 } else
1760 __free_slab(s, page);
1761}
1762
1763static void discard_slab(struct kmem_cache *s, struct page *page)
1764{
205ab99d 1765 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1766 free_slab(s, page);
1767}
1768
1769/*
5cc6eee8 1770 * Management of partially allocated slabs.
81819f0f 1771 */
1e4dd946
SR
1772static inline void
1773__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
81819f0f 1774{
e95eed57 1775 n->nr_partial++;
136333d1 1776 if (tail == DEACTIVATE_TO_TAIL)
7c2e132c
CL
1777 list_add_tail(&page->lru, &n->partial);
1778 else
1779 list_add(&page->lru, &n->partial);
81819f0f
CL
1780}
1781
1e4dd946
SR
1782static inline void add_partial(struct kmem_cache_node *n,
1783 struct page *page, int tail)
62e346a8 1784{
c65c1877 1785 lockdep_assert_held(&n->list_lock);
1e4dd946
SR
1786 __add_partial(n, page, tail);
1787}
c65c1877 1788
1e4dd946
SR
1789static inline void remove_partial(struct kmem_cache_node *n,
1790 struct page *page)
1791{
1792 lockdep_assert_held(&n->list_lock);
52b4b950
DS
1793 list_del(&page->lru);
1794 n->nr_partial--;
1e4dd946
SR
1795}
1796
81819f0f 1797/*
7ced3719
CL
1798 * Remove slab from the partial list, freeze it and
1799 * return the pointer to the freelist.
81819f0f 1800 *
497b66f2 1801 * Returns a list of objects or NULL if it fails.
81819f0f 1802 */
497b66f2 1803static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1 1804 struct kmem_cache_node *n, struct page *page,
633b0764 1805 int mode, int *objects)
81819f0f 1806{
2cfb7455
CL
1807 void *freelist;
1808 unsigned long counters;
1809 struct page new;
1810
c65c1877
PZ
1811 lockdep_assert_held(&n->list_lock);
1812
2cfb7455
CL
1813 /*
1814 * Zap the freelist and set the frozen bit.
1815 * The old freelist is the list of objects for the
1816 * per cpu allocation list.
1817 */
7ced3719
CL
1818 freelist = page->freelist;
1819 counters = page->counters;
1820 new.counters = counters;
633b0764 1821 *objects = new.objects - new.inuse;
23910c50 1822 if (mode) {
7ced3719 1823 new.inuse = page->objects;
23910c50
PE
1824 new.freelist = NULL;
1825 } else {
1826 new.freelist = freelist;
1827 }
2cfb7455 1828
a0132ac0 1829 VM_BUG_ON(new.frozen);
7ced3719 1830 new.frozen = 1;
2cfb7455 1831
7ced3719 1832 if (!__cmpxchg_double_slab(s, page,
2cfb7455 1833 freelist, counters,
02d7633f 1834 new.freelist, new.counters,
7ced3719 1835 "acquire_slab"))
7ced3719 1836 return NULL;
2cfb7455
CL
1837
1838 remove_partial(n, page);
7ced3719 1839 WARN_ON(!freelist);
49e22585 1840 return freelist;
81819f0f
CL
1841}
1842
633b0764 1843static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
8ba00bb6 1844static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
49e22585 1845
81819f0f 1846/*
672bba3a 1847 * Try to allocate a partial slab from a specific node.
81819f0f 1848 */
8ba00bb6
JK
1849static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1850 struct kmem_cache_cpu *c, gfp_t flags)
81819f0f 1851{
49e22585
CL
1852 struct page *page, *page2;
1853 void *object = NULL;
e5d9998f 1854 unsigned int available = 0;
633b0764 1855 int objects;
81819f0f
CL
1856
1857 /*
1858 * Racy check. If we mistakenly see no partial slabs then we
1859 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1860 * partial slab and there is none available then get_partials()
1861 * will return NULL.
81819f0f
CL
1862 */
1863 if (!n || !n->nr_partial)
1864 return NULL;
1865
1866 spin_lock(&n->list_lock);
49e22585 1867 list_for_each_entry_safe(page, page2, &n->partial, lru) {
8ba00bb6 1868 void *t;
49e22585 1869
8ba00bb6
JK
1870 if (!pfmemalloc_match(page, flags))
1871 continue;
1872
633b0764 1873 t = acquire_slab(s, n, page, object == NULL, &objects);
49e22585
CL
1874 if (!t)
1875 break;
1876
633b0764 1877 available += objects;
12d79634 1878 if (!object) {
49e22585 1879 c->page = page;
49e22585 1880 stat(s, ALLOC_FROM_PARTIAL);
49e22585 1881 object = t;
49e22585 1882 } else {
633b0764 1883 put_cpu_partial(s, page, 0);
8028dcea 1884 stat(s, CPU_PARTIAL_NODE);
49e22585 1885 }
345c905d 1886 if (!kmem_cache_has_cpu_partial(s)
e6d0e1dc 1887 || available > slub_cpu_partial(s) / 2)
49e22585
CL
1888 break;
1889
497b66f2 1890 }
81819f0f 1891 spin_unlock(&n->list_lock);
497b66f2 1892 return object;
81819f0f
CL
1893}
1894
1895/*
672bba3a 1896 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f 1897 */
de3ec035 1898static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
acd19fd1 1899 struct kmem_cache_cpu *c)
81819f0f
CL
1900{
1901#ifdef CONFIG_NUMA
1902 struct zonelist *zonelist;
dd1a239f 1903 struct zoneref *z;
54a6eb5c
MG
1904 struct zone *zone;
1905 enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2 1906 void *object;
cc9a6c87 1907 unsigned int cpuset_mems_cookie;
81819f0f
CL
1908
1909 /*
672bba3a
CL
1910 * The defrag ratio allows a configuration of the tradeoffs between
1911 * inter node defragmentation and node local allocations. A lower
1912 * defrag_ratio increases the tendency to do local allocations
1913 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1914 *
672bba3a
CL
1915 * If the defrag_ratio is set to 0 then kmalloc() always
1916 * returns node local objects. If the ratio is higher then kmalloc()
1917 * may return off node objects because partial slabs are obtained
1918 * from other nodes and filled up.
81819f0f 1919 *
43efd3ea
LP
1920 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1921 * (which makes defrag_ratio = 1000) then every (well almost)
1922 * allocation will first attempt to defrag slab caches on other nodes.
1923 * This means scanning over all nodes to look for partial slabs which
1924 * may be expensive if we do it every time we are trying to find a slab
672bba3a 1925 * with available objects.
81819f0f 1926 */
9824601e
CL
1927 if (!s->remote_node_defrag_ratio ||
1928 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1929 return NULL;
1930
cc9a6c87 1931 do {
d26914d1 1932 cpuset_mems_cookie = read_mems_allowed_begin();
2a389610 1933 zonelist = node_zonelist(mempolicy_slab_node(), flags);
cc9a6c87
MG
1934 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1935 struct kmem_cache_node *n;
1936
1937 n = get_node(s, zone_to_nid(zone));
1938
dee2f8aa 1939 if (n && cpuset_zone_allowed(zone, flags) &&
cc9a6c87 1940 n->nr_partial > s->min_partial) {
8ba00bb6 1941 object = get_partial_node(s, n, c, flags);
cc9a6c87
MG
1942 if (object) {
1943 /*
d26914d1
MG
1944 * Don't check read_mems_allowed_retry()
1945 * here - if mems_allowed was updated in
1946 * parallel, that was a harmless race
1947 * between allocation and the cpuset
1948 * update
cc9a6c87 1949 */
cc9a6c87
MG
1950 return object;
1951 }
c0ff7453 1952 }
81819f0f 1953 }
d26914d1 1954 } while (read_mems_allowed_retry(cpuset_mems_cookie));
81819f0f
CL
1955#endif
1956 return NULL;
1957}
1958
1959/*
1960 * Get a partial page, lock it and return it.
1961 */
497b66f2 1962static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1 1963 struct kmem_cache_cpu *c)
81819f0f 1964{
497b66f2 1965 void *object;
a561ce00
JK
1966 int searchnode = node;
1967
1968 if (node == NUMA_NO_NODE)
1969 searchnode = numa_mem_id();
1970 else if (!node_present_pages(node))
1971 searchnode = node_to_mem_node(node);
81819f0f 1972
8ba00bb6 1973 object = get_partial_node(s, get_node(s, searchnode), c, flags);
497b66f2
CL
1974 if (object || node != NUMA_NO_NODE)
1975 return object;
81819f0f 1976
acd19fd1 1977 return get_any_partial(s, flags, c);
81819f0f
CL
1978}
1979
8a5ec0ba
CL
1980#ifdef CONFIG_PREEMPT
1981/*
1982 * Calculate the next globally unique transaction for disambiguiation
1983 * during cmpxchg. The transactions start with the cpu number and are then
1984 * incremented by CONFIG_NR_CPUS.
1985 */
1986#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1987#else
1988/*
1989 * No preemption supported therefore also no need to check for
1990 * different cpus.
1991 */
1992#define TID_STEP 1
1993#endif
1994
1995static inline unsigned long next_tid(unsigned long tid)
1996{
1997 return tid + TID_STEP;
1998}
1999
2000static inline unsigned int tid_to_cpu(unsigned long tid)
2001{
2002 return tid % TID_STEP;
2003}
2004
2005static inline unsigned long tid_to_event(unsigned long tid)
2006{
2007 return tid / TID_STEP;
2008}
2009
2010static inline unsigned int init_tid(int cpu)
2011{
2012 return cpu;
2013}
2014
2015static inline void note_cmpxchg_failure(const char *n,
2016 const struct kmem_cache *s, unsigned long tid)
2017{
2018#ifdef SLUB_DEBUG_CMPXCHG
2019 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2020
f9f58285 2021 pr_info("%s %s: cmpxchg redo ", n, s->name);
8a5ec0ba
CL
2022
2023#ifdef CONFIG_PREEMPT
2024 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
f9f58285 2025 pr_warn("due to cpu change %d -> %d\n",
8a5ec0ba
CL
2026 tid_to_cpu(tid), tid_to_cpu(actual_tid));
2027 else
2028#endif
2029 if (tid_to_event(tid) != tid_to_event(actual_tid))
f9f58285 2030 pr_warn("due to cpu running other code. Event %ld->%ld\n",
8a5ec0ba
CL
2031 tid_to_event(tid), tid_to_event(actual_tid));
2032 else
f9f58285 2033 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
8a5ec0ba
CL
2034 actual_tid, tid, next_tid(tid));
2035#endif
4fdccdfb 2036 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba
CL
2037}
2038
788e1aad 2039static void init_kmem_cache_cpus(struct kmem_cache *s)
8a5ec0ba 2040{
8a5ec0ba
CL
2041 int cpu;
2042
2043 for_each_possible_cpu(cpu)
2044 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba 2045}
2cfb7455 2046
81819f0f
CL
2047/*
2048 * Remove the cpu slab
2049 */
d0e0ac97 2050static void deactivate_slab(struct kmem_cache *s, struct page *page,
d4ff6d35 2051 void *freelist, struct kmem_cache_cpu *c)
81819f0f 2052{
2cfb7455 2053 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2cfb7455
CL
2054 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2055 int lock = 0;
2056 enum slab_modes l = M_NONE, m = M_NONE;
2cfb7455 2057 void *nextfree;
136333d1 2058 int tail = DEACTIVATE_TO_HEAD;
2cfb7455
CL
2059 struct page new;
2060 struct page old;
2061
2062 if (page->freelist) {
84e554e6 2063 stat(s, DEACTIVATE_REMOTE_FREES);
136333d1 2064 tail = DEACTIVATE_TO_TAIL;
2cfb7455
CL
2065 }
2066
894b8788 2067 /*
2cfb7455
CL
2068 * Stage one: Free all available per cpu objects back
2069 * to the page freelist while it is still frozen. Leave the
2070 * last one.
2071 *
2072 * There is no need to take the list->lock because the page
2073 * is still frozen.
2074 */
2075 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2076 void *prior;
2077 unsigned long counters;
2078
2079 do {
2080 prior = page->freelist;
2081 counters = page->counters;
2082 set_freepointer(s, freelist, prior);
2083 new.counters = counters;
2084 new.inuse--;
a0132ac0 2085 VM_BUG_ON(!new.frozen);
2cfb7455 2086
1d07171c 2087 } while (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2088 prior, counters,
2089 freelist, new.counters,
2090 "drain percpu freelist"));
2091
2092 freelist = nextfree;
2093 }
2094
894b8788 2095 /*
2cfb7455
CL
2096 * Stage two: Ensure that the page is unfrozen while the
2097 * list presence reflects the actual number of objects
2098 * during unfreeze.
2099 *
2100 * We setup the list membership and then perform a cmpxchg
2101 * with the count. If there is a mismatch then the page
2102 * is not unfrozen but the page is on the wrong list.
2103 *
2104 * Then we restart the process which may have to remove
2105 * the page from the list that we just put it on again
2106 * because the number of objects in the slab may have
2107 * changed.
894b8788 2108 */
2cfb7455 2109redo:
894b8788 2110
2cfb7455
CL
2111 old.freelist = page->freelist;
2112 old.counters = page->counters;
a0132ac0 2113 VM_BUG_ON(!old.frozen);
7c2e132c 2114
2cfb7455
CL
2115 /* Determine target state of the slab */
2116 new.counters = old.counters;
2117 if (freelist) {
2118 new.inuse--;
2119 set_freepointer(s, freelist, old.freelist);
2120 new.freelist = freelist;
2121 } else
2122 new.freelist = old.freelist;
2123
2124 new.frozen = 0;
2125
8a5b20ae 2126 if (!new.inuse && n->nr_partial >= s->min_partial)
2cfb7455
CL
2127 m = M_FREE;
2128 else if (new.freelist) {
2129 m = M_PARTIAL;
2130 if (!lock) {
2131 lock = 1;
2132 /*
2133 * Taking the spinlock removes the possiblity
2134 * that acquire_slab() will see a slab page that
2135 * is frozen
2136 */
2137 spin_lock(&n->list_lock);
2138 }
2139 } else {
2140 m = M_FULL;
2141 if (kmem_cache_debug(s) && !lock) {
2142 lock = 1;
2143 /*
2144 * This also ensures that the scanning of full
2145 * slabs from diagnostic functions will not see
2146 * any frozen slabs.
2147 */
2148 spin_lock(&n->list_lock);
2149 }
2150 }
2151
2152 if (l != m) {
2cfb7455 2153 if (l == M_PARTIAL)
2cfb7455 2154 remove_partial(n, page);
2cfb7455 2155 else if (l == M_FULL)
c65c1877 2156 remove_full(s, n, page);
2cfb7455 2157
88349a28 2158 if (m == M_PARTIAL)
2cfb7455 2159 add_partial(n, page, tail);
88349a28 2160 else if (m == M_FULL)
2cfb7455 2161 add_full(s, n, page);
2cfb7455
CL
2162 }
2163
2164 l = m;
1d07171c 2165 if (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2166 old.freelist, old.counters,
2167 new.freelist, new.counters,
2168 "unfreezing slab"))
2169 goto redo;
2170
2cfb7455
CL
2171 if (lock)
2172 spin_unlock(&n->list_lock);
2173
88349a28
WY
2174 if (m == M_PARTIAL)
2175 stat(s, tail);
2176 else if (m == M_FULL)
2177 stat(s, DEACTIVATE_FULL);
2178 else if (m == M_FREE) {
2cfb7455
CL
2179 stat(s, DEACTIVATE_EMPTY);
2180 discard_slab(s, page);
2181 stat(s, FREE_SLAB);
894b8788 2182 }
d4ff6d35
WY
2183
2184 c->page = NULL;
2185 c->freelist = NULL;
81819f0f
CL
2186}
2187
d24ac77f
JK
2188/*
2189 * Unfreeze all the cpu partial slabs.
2190 *
59a09917
CL
2191 * This function must be called with interrupts disabled
2192 * for the cpu using c (or some other guarantee must be there
2193 * to guarantee no concurrent accesses).
d24ac77f 2194 */
59a09917
CL
2195static void unfreeze_partials(struct kmem_cache *s,
2196 struct kmem_cache_cpu *c)
49e22585 2197{
345c905d 2198#ifdef CONFIG_SLUB_CPU_PARTIAL
43d77867 2199 struct kmem_cache_node *n = NULL, *n2 = NULL;
9ada1934 2200 struct page *page, *discard_page = NULL;
49e22585
CL
2201
2202 while ((page = c->partial)) {
49e22585
CL
2203 struct page new;
2204 struct page old;
2205
2206 c->partial = page->next;
43d77867
JK
2207
2208 n2 = get_node(s, page_to_nid(page));
2209 if (n != n2) {
2210 if (n)
2211 spin_unlock(&n->list_lock);
2212
2213 n = n2;
2214 spin_lock(&n->list_lock);
2215 }
49e22585
CL
2216
2217 do {
2218
2219 old.freelist = page->freelist;
2220 old.counters = page->counters;
a0132ac0 2221 VM_BUG_ON(!old.frozen);
49e22585
CL
2222
2223 new.counters = old.counters;
2224 new.freelist = old.freelist;
2225
2226 new.frozen = 0;
2227
d24ac77f 2228 } while (!__cmpxchg_double_slab(s, page,
49e22585
CL
2229 old.freelist, old.counters,
2230 new.freelist, new.counters,
2231 "unfreezing slab"));
2232
8a5b20ae 2233 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
9ada1934
SL
2234 page->next = discard_page;
2235 discard_page = page;
43d77867
JK
2236 } else {
2237 add_partial(n, page, DEACTIVATE_TO_TAIL);
2238 stat(s, FREE_ADD_PARTIAL);
49e22585
CL
2239 }
2240 }
2241
2242 if (n)
2243 spin_unlock(&n->list_lock);
9ada1934
SL
2244
2245 while (discard_page) {
2246 page = discard_page;
2247 discard_page = discard_page->next;
2248
2249 stat(s, DEACTIVATE_EMPTY);
2250 discard_slab(s, page);
2251 stat(s, FREE_SLAB);
2252 }
345c905d 2253#endif
49e22585
CL
2254}
2255
2256/*
2257 * Put a page that was just frozen (in __slab_free) into a partial page
0d2d5d40 2258 * slot if available.
49e22585
CL
2259 *
2260 * If we did not find a slot then simply move all the partials to the
2261 * per node partial list.
2262 */
633b0764 2263static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
49e22585 2264{
345c905d 2265#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585
CL
2266 struct page *oldpage;
2267 int pages;
2268 int pobjects;
2269
d6e0b7fa 2270 preempt_disable();
49e22585
CL
2271 do {
2272 pages = 0;
2273 pobjects = 0;
2274 oldpage = this_cpu_read(s->cpu_slab->partial);
2275
2276 if (oldpage) {
2277 pobjects = oldpage->pobjects;
2278 pages = oldpage->pages;
2279 if (drain && pobjects > s->cpu_partial) {
2280 unsigned long flags;
2281 /*
2282 * partial array is full. Move the existing
2283 * set to the per node partial list.
2284 */
2285 local_irq_save(flags);
59a09917 2286 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
49e22585 2287 local_irq_restore(flags);
e24fc410 2288 oldpage = NULL;
49e22585
CL
2289 pobjects = 0;
2290 pages = 0;
8028dcea 2291 stat(s, CPU_PARTIAL_DRAIN);
49e22585
CL
2292 }
2293 }
2294
2295 pages++;
2296 pobjects += page->objects - page->inuse;
2297
2298 page->pages = pages;
2299 page->pobjects = pobjects;
2300 page->next = oldpage;
2301
d0e0ac97
CG
2302 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2303 != oldpage);
d6e0b7fa
VD
2304 if (unlikely(!s->cpu_partial)) {
2305 unsigned long flags;
2306
2307 local_irq_save(flags);
2308 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2309 local_irq_restore(flags);
2310 }
2311 preempt_enable();
345c905d 2312#endif
49e22585
CL
2313}
2314
dfb4f096 2315static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 2316{
84e554e6 2317 stat(s, CPUSLAB_FLUSH);
d4ff6d35 2318 deactivate_slab(s, c->page, c->freelist, c);
c17dda40
CL
2319
2320 c->tid = next_tid(c->tid);
81819f0f
CL
2321}
2322
2323/*
2324 * Flush cpu slab.
6446faa2 2325 *
81819f0f
CL
2326 * Called from IPI handler with interrupts disabled.
2327 */
0c710013 2328static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 2329{
9dfc6e68 2330 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0f 2331
1265ef2d
WY
2332 if (c->page)
2333 flush_slab(s, c);
49e22585 2334
1265ef2d 2335 unfreeze_partials(s, c);
81819f0f
CL
2336}
2337
2338static void flush_cpu_slab(void *d)
2339{
2340 struct kmem_cache *s = d;
81819f0f 2341
dfb4f096 2342 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
2343}
2344
a8364d55
GBY
2345static bool has_cpu_slab(int cpu, void *info)
2346{
2347 struct kmem_cache *s = info;
2348 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2349
a93cf07b 2350 return c->page || slub_percpu_partial(c);
a8364d55
GBY
2351}
2352
81819f0f
CL
2353static void flush_all(struct kmem_cache *s)
2354{
a8364d55 2355 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
81819f0f
CL
2356}
2357
a96a87bf
SAS
2358/*
2359 * Use the cpu notifier to insure that the cpu slabs are flushed when
2360 * necessary.
2361 */
2362static int slub_cpu_dead(unsigned int cpu)
2363{
2364 struct kmem_cache *s;
2365 unsigned long flags;
2366
2367 mutex_lock(&slab_mutex);
2368 list_for_each_entry(s, &slab_caches, list) {
2369 local_irq_save(flags);
2370 __flush_cpu_slab(s, cpu);
2371 local_irq_restore(flags);
2372 }
2373 mutex_unlock(&slab_mutex);
2374 return 0;
2375}
2376
dfb4f096
CL
2377/*
2378 * Check if the objects in a per cpu structure fit numa
2379 * locality expectations.
2380 */
57d437d2 2381static inline int node_match(struct page *page, int node)
dfb4f096
CL
2382{
2383#ifdef CONFIG_NUMA
6159d0f5 2384 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
dfb4f096
CL
2385 return 0;
2386#endif
2387 return 1;
2388}
2389
9a02d699 2390#ifdef CONFIG_SLUB_DEBUG
781b2ba6
PE
2391static int count_free(struct page *page)
2392{
2393 return page->objects - page->inuse;
2394}
2395
9a02d699
DR
2396static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2397{
2398 return atomic_long_read(&n->total_objects);
2399}
2400#endif /* CONFIG_SLUB_DEBUG */
2401
2402#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
781b2ba6
PE
2403static unsigned long count_partial(struct kmem_cache_node *n,
2404 int (*get_count)(struct page *))
2405{
2406 unsigned long flags;
2407 unsigned long x = 0;
2408 struct page *page;
2409
2410 spin_lock_irqsave(&n->list_lock, flags);
2411 list_for_each_entry(page, &n->partial, lru)
2412 x += get_count(page);
2413 spin_unlock_irqrestore(&n->list_lock, flags);
2414 return x;
2415}
9a02d699 2416#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
26c02cf0 2417
781b2ba6
PE
2418static noinline void
2419slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2420{
9a02d699
DR
2421#ifdef CONFIG_SLUB_DEBUG
2422 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2423 DEFAULT_RATELIMIT_BURST);
781b2ba6 2424 int node;
fa45dc25 2425 struct kmem_cache_node *n;
781b2ba6 2426
9a02d699
DR
2427 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2428 return;
2429
5b3810e5
VB
2430 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2431 nid, gfpflags, &gfpflags);
19af27af 2432 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
f9f58285
FF
2433 s->name, s->object_size, s->size, oo_order(s->oo),
2434 oo_order(s->min));
781b2ba6 2435
3b0efdfa 2436 if (oo_order(s->min) > get_order(s->object_size))
f9f58285
FF
2437 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2438 s->name);
fa5ec8a1 2439
fa45dc25 2440 for_each_kmem_cache_node(s, node, n) {
781b2ba6
PE
2441 unsigned long nr_slabs;
2442 unsigned long nr_objs;
2443 unsigned long nr_free;
2444
26c02cf0
AB
2445 nr_free = count_partial(n, count_free);
2446 nr_slabs = node_nr_slabs(n);
2447 nr_objs = node_nr_objs(n);
781b2ba6 2448
f9f58285 2449 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
781b2ba6
PE
2450 node, nr_slabs, nr_objs, nr_free);
2451 }
9a02d699 2452#endif
781b2ba6
PE
2453}
2454
497b66f2
CL
2455static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2456 int node, struct kmem_cache_cpu **pc)
2457{
6faa6833 2458 void *freelist;
188fd063
CL
2459 struct kmem_cache_cpu *c = *pc;
2460 struct page *page;
497b66f2 2461
128227e7
MW
2462 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2463
188fd063 2464 freelist = get_partial(s, flags, node, c);
497b66f2 2465
188fd063
CL
2466 if (freelist)
2467 return freelist;
2468
2469 page = new_slab(s, flags, node);
497b66f2 2470 if (page) {
7c8e0181 2471 c = raw_cpu_ptr(s->cpu_slab);
497b66f2
CL
2472 if (c->page)
2473 flush_slab(s, c);
2474
2475 /*
2476 * No other reference to the page yet so we can
2477 * muck around with it freely without cmpxchg
2478 */
6faa6833 2479 freelist = page->freelist;
497b66f2
CL
2480 page->freelist = NULL;
2481
2482 stat(s, ALLOC_SLAB);
497b66f2
CL
2483 c->page = page;
2484 *pc = c;
2485 } else
6faa6833 2486 freelist = NULL;
497b66f2 2487
6faa6833 2488 return freelist;
497b66f2
CL
2489}
2490
072bb0aa
MG
2491static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2492{
2493 if (unlikely(PageSlabPfmemalloc(page)))
2494 return gfp_pfmemalloc_allowed(gfpflags);
2495
2496 return true;
2497}
2498
213eeb9f 2499/*
d0e0ac97
CG
2500 * Check the page->freelist of a page and either transfer the freelist to the
2501 * per cpu freelist or deactivate the page.
213eeb9f
CL
2502 *
2503 * The page is still frozen if the return value is not NULL.
2504 *
2505 * If this function returns NULL then the page has been unfrozen.
d24ac77f
JK
2506 *
2507 * This function must be called with interrupt disabled.
213eeb9f
CL
2508 */
2509static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2510{
2511 struct page new;
2512 unsigned long counters;
2513 void *freelist;
2514
2515 do {
2516 freelist = page->freelist;
2517 counters = page->counters;
6faa6833 2518
213eeb9f 2519 new.counters = counters;
a0132ac0 2520 VM_BUG_ON(!new.frozen);
213eeb9f
CL
2521
2522 new.inuse = page->objects;
2523 new.frozen = freelist != NULL;
2524
d24ac77f 2525 } while (!__cmpxchg_double_slab(s, page,
213eeb9f
CL
2526 freelist, counters,
2527 NULL, new.counters,
2528 "get_freelist"));
2529
2530 return freelist;
2531}
2532
81819f0f 2533/*
894b8788
CL
2534 * Slow path. The lockless freelist is empty or we need to perform
2535 * debugging duties.
2536 *
894b8788
CL
2537 * Processing is still very fast if new objects have been freed to the
2538 * regular freelist. In that case we simply take over the regular freelist
2539 * as the lockless freelist and zap the regular freelist.
81819f0f 2540 *
894b8788
CL
2541 * If that is not working then we fall back to the partial lists. We take the
2542 * first element of the freelist as the object to allocate now and move the
2543 * rest of the freelist to the lockless freelist.
81819f0f 2544 *
894b8788 2545 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
2546 * we need to allocate a new slab. This is the slowest path since it involves
2547 * a call to the page allocator and the setup of a new slab.
a380a3c7
CL
2548 *
2549 * Version of __slab_alloc to use when we know that interrupts are
2550 * already disabled (which is the case for bulk allocation).
81819f0f 2551 */
a380a3c7 2552static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
ce71e27c 2553 unsigned long addr, struct kmem_cache_cpu *c)
81819f0f 2554{
6faa6833 2555 void *freelist;
f6e7def7 2556 struct page *page;
81819f0f 2557
f6e7def7
CL
2558 page = c->page;
2559 if (!page)
81819f0f 2560 goto new_slab;
49e22585 2561redo:
6faa6833 2562
57d437d2 2563 if (unlikely(!node_match(page, node))) {
a561ce00
JK
2564 int searchnode = node;
2565
2566 if (node != NUMA_NO_NODE && !node_present_pages(node))
2567 searchnode = node_to_mem_node(node);
2568
2569 if (unlikely(!node_match(page, searchnode))) {
2570 stat(s, ALLOC_NODE_MISMATCH);
d4ff6d35 2571 deactivate_slab(s, page, c->freelist, c);
a561ce00
JK
2572 goto new_slab;
2573 }
fc59c053 2574 }
6446faa2 2575
072bb0aa
MG
2576 /*
2577 * By rights, we should be searching for a slab page that was
2578 * PFMEMALLOC but right now, we are losing the pfmemalloc
2579 * information when the page leaves the per-cpu allocator
2580 */
2581 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
d4ff6d35 2582 deactivate_slab(s, page, c->freelist, c);
072bb0aa
MG
2583 goto new_slab;
2584 }
2585
73736e03 2586 /* must check again c->freelist in case of cpu migration or IRQ */
6faa6833
CL
2587 freelist = c->freelist;
2588 if (freelist)
73736e03 2589 goto load_freelist;
03e404af 2590
f6e7def7 2591 freelist = get_freelist(s, page);
6446faa2 2592
6faa6833 2593 if (!freelist) {
03e404af
CL
2594 c->page = NULL;
2595 stat(s, DEACTIVATE_BYPASS);
fc59c053 2596 goto new_slab;
03e404af 2597 }
6446faa2 2598
84e554e6 2599 stat(s, ALLOC_REFILL);
6446faa2 2600
894b8788 2601load_freelist:
507effea
CL
2602 /*
2603 * freelist is pointing to the list of objects to be used.
2604 * page is pointing to the page from which the objects are obtained.
2605 * That page must be frozen for per cpu allocations to work.
2606 */
a0132ac0 2607 VM_BUG_ON(!c->page->frozen);
6faa6833 2608 c->freelist = get_freepointer(s, freelist);
8a5ec0ba 2609 c->tid = next_tid(c->tid);
6faa6833 2610 return freelist;
81819f0f 2611
81819f0f 2612new_slab:
2cfb7455 2613
a93cf07b
WY
2614 if (slub_percpu_partial(c)) {
2615 page = c->page = slub_percpu_partial(c);
2616 slub_set_percpu_partial(c, page);
49e22585 2617 stat(s, CPU_PARTIAL_ALLOC);
49e22585 2618 goto redo;
81819f0f
CL
2619 }
2620
188fd063 2621 freelist = new_slab_objects(s, gfpflags, node, &c);
01ad8a7b 2622
f4697436 2623 if (unlikely(!freelist)) {
9a02d699 2624 slab_out_of_memory(s, gfpflags, node);
f4697436 2625 return NULL;
81819f0f 2626 }
2cfb7455 2627
f6e7def7 2628 page = c->page;
5091b74a 2629 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
4b6f0750 2630 goto load_freelist;
2cfb7455 2631
497b66f2 2632 /* Only entered in the debug case */
d0e0ac97
CG
2633 if (kmem_cache_debug(s) &&
2634 !alloc_debug_processing(s, page, freelist, addr))
497b66f2 2635 goto new_slab; /* Slab failed checks. Next slab needed */
894b8788 2636
d4ff6d35 2637 deactivate_slab(s, page, get_freepointer(s, freelist), c);
6faa6833 2638 return freelist;
894b8788
CL
2639}
2640
a380a3c7
CL
2641/*
2642 * Another one that disabled interrupt and compensates for possible
2643 * cpu changes by refetching the per cpu area pointer.
2644 */
2645static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2646 unsigned long addr, struct kmem_cache_cpu *c)
2647{
2648 void *p;
2649 unsigned long flags;
2650
2651 local_irq_save(flags);
2652#ifdef CONFIG_PREEMPT
2653 /*
2654 * We may have been preempted and rescheduled on a different
2655 * cpu before disabling interrupts. Need to reload cpu area
2656 * pointer.
2657 */
2658 c = this_cpu_ptr(s->cpu_slab);
2659#endif
2660
2661 p = ___slab_alloc(s, gfpflags, node, addr, c);
2662 local_irq_restore(flags);
2663 return p;
2664}
2665
894b8788
CL
2666/*
2667 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2668 * have the fastpath folded into their functions. So no function call
2669 * overhead for requests that can be satisfied on the fastpath.
2670 *
2671 * The fastpath works by first checking if the lockless freelist can be used.
2672 * If not then __slab_alloc is called for slow processing.
2673 *
2674 * Otherwise we can simply pick the next object from the lockless free list.
2675 */
2b847c3c 2676static __always_inline void *slab_alloc_node(struct kmem_cache *s,
ce71e27c 2677 gfp_t gfpflags, int node, unsigned long addr)
894b8788 2678{
03ec0ed5 2679 void *object;
dfb4f096 2680 struct kmem_cache_cpu *c;
57d437d2 2681 struct page *page;
8a5ec0ba 2682 unsigned long tid;
1f84260c 2683
8135be5a
VD
2684 s = slab_pre_alloc_hook(s, gfpflags);
2685 if (!s)
773ff60e 2686 return NULL;
8a5ec0ba 2687redo:
8a5ec0ba
CL
2688 /*
2689 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2690 * enabled. We may switch back and forth between cpus while
2691 * reading from one cpu area. That does not matter as long
2692 * as we end up on the original cpu again when doing the cmpxchg.
7cccd80b 2693 *
9aabf810
JK
2694 * We should guarantee that tid and kmem_cache are retrieved on
2695 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2696 * to check if it is matched or not.
8a5ec0ba 2697 */
9aabf810
JK
2698 do {
2699 tid = this_cpu_read(s->cpu_slab->tid);
2700 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2701 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2702 unlikely(tid != READ_ONCE(c->tid)));
9aabf810
JK
2703
2704 /*
2705 * Irqless object alloc/free algorithm used here depends on sequence
2706 * of fetching cpu_slab's data. tid should be fetched before anything
2707 * on c to guarantee that object and page associated with previous tid
2708 * won't be used with current tid. If we fetch tid first, object and
2709 * page could be one associated with next tid and our alloc/free
2710 * request will be failed. In this case, we will retry. So, no problem.
2711 */
2712 barrier();
8a5ec0ba 2713
8a5ec0ba
CL
2714 /*
2715 * The transaction ids are globally unique per cpu and per operation on
2716 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2717 * occurs on the right processor and that there was no operation on the
2718 * linked list in between.
2719 */
8a5ec0ba 2720
9dfc6e68 2721 object = c->freelist;
57d437d2 2722 page = c->page;
8eae1492 2723 if (unlikely(!object || !node_match(page, node))) {
dfb4f096 2724 object = __slab_alloc(s, gfpflags, node, addr, c);
8eae1492
DH
2725 stat(s, ALLOC_SLOWPATH);
2726 } else {
0ad9500e
ED
2727 void *next_object = get_freepointer_safe(s, object);
2728
8a5ec0ba 2729 /*
25985edc 2730 * The cmpxchg will only match if there was no additional
8a5ec0ba
CL
2731 * operation and if we are on the right processor.
2732 *
d0e0ac97
CG
2733 * The cmpxchg does the following atomically (without lock
2734 * semantics!)
8a5ec0ba
CL
2735 * 1. Relocate first pointer to the current per cpu area.
2736 * 2. Verify that tid and freelist have not been changed
2737 * 3. If they were not changed replace tid and freelist
2738 *
d0e0ac97
CG
2739 * Since this is without lock semantics the protection is only
2740 * against code executing on this cpu *not* from access by
2741 * other cpus.
8a5ec0ba 2742 */
933393f5 2743 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2744 s->cpu_slab->freelist, s->cpu_slab->tid,
2745 object, tid,
0ad9500e 2746 next_object, next_tid(tid)))) {
8a5ec0ba
CL
2747
2748 note_cmpxchg_failure("slab_alloc", s, tid);
2749 goto redo;
2750 }
0ad9500e 2751 prefetch_freepointer(s, next_object);
84e554e6 2752 stat(s, ALLOC_FASTPATH);
894b8788 2753 }
8a5ec0ba 2754
74e2134f 2755 if (unlikely(gfpflags & __GFP_ZERO) && object)
3b0efdfa 2756 memset(object, 0, s->object_size);
d07dbea4 2757
03ec0ed5 2758 slab_post_alloc_hook(s, gfpflags, 1, &object);
5a896d9e 2759
894b8788 2760 return object;
81819f0f
CL
2761}
2762
2b847c3c
EG
2763static __always_inline void *slab_alloc(struct kmem_cache *s,
2764 gfp_t gfpflags, unsigned long addr)
2765{
2766 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2767}
2768
81819f0f
CL
2769void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2770{
2b847c3c 2771 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
5b882be4 2772
d0e0ac97
CG
2773 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2774 s->size, gfpflags);
5b882be4
EGM
2775
2776 return ret;
81819f0f
CL
2777}
2778EXPORT_SYMBOL(kmem_cache_alloc);
2779
0f24f128 2780#ifdef CONFIG_TRACING
4a92379b
RK
2781void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2782{
2b847c3c 2783 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
4a92379b 2784 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
0116523c 2785 ret = kasan_kmalloc(s, ret, size, gfpflags);
4a92379b
RK
2786 return ret;
2787}
2788EXPORT_SYMBOL(kmem_cache_alloc_trace);
5b882be4
EGM
2789#endif
2790
81819f0f
CL
2791#ifdef CONFIG_NUMA
2792void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2793{
2b847c3c 2794 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
5b882be4 2795
ca2b84cb 2796 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3b0efdfa 2797 s->object_size, s->size, gfpflags, node);
5b882be4
EGM
2798
2799 return ret;
81819f0f
CL
2800}
2801EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0f 2802
0f24f128 2803#ifdef CONFIG_TRACING
4a92379b 2804void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4 2805 gfp_t gfpflags,
4a92379b 2806 int node, size_t size)
5b882be4 2807{
2b847c3c 2808 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
4a92379b
RK
2809
2810 trace_kmalloc_node(_RET_IP_, ret,
2811 size, s->size, gfpflags, node);
0316bec2 2812
0116523c 2813 ret = kasan_kmalloc(s, ret, size, gfpflags);
4a92379b 2814 return ret;
5b882be4 2815}
4a92379b 2816EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4 2817#endif
5d1f57e4 2818#endif
5b882be4 2819
81819f0f 2820/*
94e4d712 2821 * Slow path handling. This may still be called frequently since objects
894b8788 2822 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 2823 *
894b8788
CL
2824 * So we still attempt to reduce cache line usage. Just take the slab
2825 * lock and free the item. If there is no additional partial page
2826 * handling required then we can return immediately.
81819f0f 2827 */
894b8788 2828static void __slab_free(struct kmem_cache *s, struct page *page,
81084651
JDB
2829 void *head, void *tail, int cnt,
2830 unsigned long addr)
2831
81819f0f
CL
2832{
2833 void *prior;
2cfb7455 2834 int was_frozen;
2cfb7455
CL
2835 struct page new;
2836 unsigned long counters;
2837 struct kmem_cache_node *n = NULL;
61728d1e 2838 unsigned long uninitialized_var(flags);
81819f0f 2839
8a5ec0ba 2840 stat(s, FREE_SLOWPATH);
81819f0f 2841
19c7ff9e 2842 if (kmem_cache_debug(s) &&
282acb43 2843 !free_debug_processing(s, page, head, tail, cnt, addr))
80f08c19 2844 return;
6446faa2 2845
2cfb7455 2846 do {
837d678d
JK
2847 if (unlikely(n)) {
2848 spin_unlock_irqrestore(&n->list_lock, flags);
2849 n = NULL;
2850 }
2cfb7455
CL
2851 prior = page->freelist;
2852 counters = page->counters;
81084651 2853 set_freepointer(s, tail, prior);
2cfb7455
CL
2854 new.counters = counters;
2855 was_frozen = new.frozen;
81084651 2856 new.inuse -= cnt;
837d678d 2857 if ((!new.inuse || !prior) && !was_frozen) {
49e22585 2858
c65c1877 2859 if (kmem_cache_has_cpu_partial(s) && !prior) {
49e22585
CL
2860
2861 /*
d0e0ac97
CG
2862 * Slab was on no list before and will be
2863 * partially empty
2864 * We can defer the list move and instead
2865 * freeze it.
49e22585
CL
2866 */
2867 new.frozen = 1;
2868
c65c1877 2869 } else { /* Needs to be taken off a list */
49e22585 2870
b455def2 2871 n = get_node(s, page_to_nid(page));
49e22585
CL
2872 /*
2873 * Speculatively acquire the list_lock.
2874 * If the cmpxchg does not succeed then we may
2875 * drop the list_lock without any processing.
2876 *
2877 * Otherwise the list_lock will synchronize with
2878 * other processors updating the list of slabs.
2879 */
2880 spin_lock_irqsave(&n->list_lock, flags);
2881
2882 }
2cfb7455 2883 }
81819f0f 2884
2cfb7455
CL
2885 } while (!cmpxchg_double_slab(s, page,
2886 prior, counters,
81084651 2887 head, new.counters,
2cfb7455 2888 "__slab_free"));
81819f0f 2889
2cfb7455 2890 if (likely(!n)) {
49e22585
CL
2891
2892 /*
2893 * If we just froze the page then put it onto the
2894 * per cpu partial list.
2895 */
8028dcea 2896 if (new.frozen && !was_frozen) {
49e22585 2897 put_cpu_partial(s, page, 1);
8028dcea
AS
2898 stat(s, CPU_PARTIAL_FREE);
2899 }
49e22585 2900 /*
2cfb7455
CL
2901 * The list lock was not taken therefore no list
2902 * activity can be necessary.
2903 */
b455def2
L
2904 if (was_frozen)
2905 stat(s, FREE_FROZEN);
2906 return;
2907 }
81819f0f 2908
8a5b20ae 2909 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
837d678d
JK
2910 goto slab_empty;
2911
81819f0f 2912 /*
837d678d
JK
2913 * Objects left in the slab. If it was not on the partial list before
2914 * then add it.
81819f0f 2915 */
345c905d
JK
2916 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2917 if (kmem_cache_debug(s))
c65c1877 2918 remove_full(s, n, page);
837d678d
JK
2919 add_partial(n, page, DEACTIVATE_TO_TAIL);
2920 stat(s, FREE_ADD_PARTIAL);
8ff12cfc 2921 }
80f08c19 2922 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2923 return;
2924
2925slab_empty:
a973e9dd 2926 if (prior) {
81819f0f 2927 /*
6fbabb20 2928 * Slab on the partial list.
81819f0f 2929 */
5cc6eee8 2930 remove_partial(n, page);
84e554e6 2931 stat(s, FREE_REMOVE_PARTIAL);
c65c1877 2932 } else {
6fbabb20 2933 /* Slab must be on the full list */
c65c1877
PZ
2934 remove_full(s, n, page);
2935 }
2cfb7455 2936
80f08c19 2937 spin_unlock_irqrestore(&n->list_lock, flags);
84e554e6 2938 stat(s, FREE_SLAB);
81819f0f 2939 discard_slab(s, page);
81819f0f
CL
2940}
2941
894b8788
CL
2942/*
2943 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2944 * can perform fastpath freeing without additional function calls.
2945 *
2946 * The fastpath is only possible if we are freeing to the current cpu slab
2947 * of this processor. This typically the case if we have just allocated
2948 * the item before.
2949 *
2950 * If fastpath is not possible then fall back to __slab_free where we deal
2951 * with all sorts of special processing.
81084651
JDB
2952 *
2953 * Bulk free of a freelist with several objects (all pointing to the
2954 * same page) possible by specifying head and tail ptr, plus objects
2955 * count (cnt). Bulk free indicated by tail pointer being set.
894b8788 2956 */
80a9201a
AP
2957static __always_inline void do_slab_free(struct kmem_cache *s,
2958 struct page *page, void *head, void *tail,
2959 int cnt, unsigned long addr)
894b8788 2960{
81084651 2961 void *tail_obj = tail ? : head;
dfb4f096 2962 struct kmem_cache_cpu *c;
8a5ec0ba 2963 unsigned long tid;
8a5ec0ba
CL
2964redo:
2965 /*
2966 * Determine the currently cpus per cpu slab.
2967 * The cpu may change afterward. However that does not matter since
2968 * data is retrieved via this pointer. If we are on the same cpu
2ae44005 2969 * during the cmpxchg then the free will succeed.
8a5ec0ba 2970 */
9aabf810
JK
2971 do {
2972 tid = this_cpu_read(s->cpu_slab->tid);
2973 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2974 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2975 unlikely(tid != READ_ONCE(c->tid)));
c016b0bd 2976
9aabf810
JK
2977 /* Same with comment on barrier() in slab_alloc_node() */
2978 barrier();
c016b0bd 2979
442b06bc 2980 if (likely(page == c->page)) {
81084651 2981 set_freepointer(s, tail_obj, c->freelist);
8a5ec0ba 2982
933393f5 2983 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2984 s->cpu_slab->freelist, s->cpu_slab->tid,
2985 c->freelist, tid,
81084651 2986 head, next_tid(tid)))) {
8a5ec0ba
CL
2987
2988 note_cmpxchg_failure("slab_free", s, tid);
2989 goto redo;
2990 }
84e554e6 2991 stat(s, FREE_FASTPATH);
894b8788 2992 } else
81084651 2993 __slab_free(s, page, head, tail_obj, cnt, addr);
894b8788 2994
894b8788
CL
2995}
2996
80a9201a
AP
2997static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2998 void *head, void *tail, int cnt,
2999 unsigned long addr)
3000{
80a9201a 3001 /*
c3895391
AK
3002 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3003 * to remove objects, whose reuse must be delayed.
80a9201a 3004 */
c3895391
AK
3005 if (slab_free_freelist_hook(s, &head, &tail))
3006 do_slab_free(s, page, head, tail, cnt, addr);
80a9201a
AP
3007}
3008
2bd926b4 3009#ifdef CONFIG_KASAN_GENERIC
80a9201a
AP
3010void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3011{
3012 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3013}
3014#endif
3015
81819f0f
CL
3016void kmem_cache_free(struct kmem_cache *s, void *x)
3017{
b9ce5ef4
GC
3018 s = cache_from_obj(s, x);
3019 if (!s)
79576102 3020 return;
81084651 3021 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
ca2b84cb 3022 trace_kmem_cache_free(_RET_IP_, x);
81819f0f
CL
3023}
3024EXPORT_SYMBOL(kmem_cache_free);
3025
d0ecd894 3026struct detached_freelist {
fbd02630 3027 struct page *page;
d0ecd894
JDB
3028 void *tail;
3029 void *freelist;
3030 int cnt;
376bf125 3031 struct kmem_cache *s;
d0ecd894 3032};
fbd02630 3033
d0ecd894
JDB
3034/*
3035 * This function progressively scans the array with free objects (with
3036 * a limited look ahead) and extract objects belonging to the same
3037 * page. It builds a detached freelist directly within the given
3038 * page/objects. This can happen without any need for
3039 * synchronization, because the objects are owned by running process.
3040 * The freelist is build up as a single linked list in the objects.
3041 * The idea is, that this detached freelist can then be bulk
3042 * transferred to the real freelist(s), but only requiring a single
3043 * synchronization primitive. Look ahead in the array is limited due
3044 * to performance reasons.
3045 */
376bf125
JDB
3046static inline
3047int build_detached_freelist(struct kmem_cache *s, size_t size,
3048 void **p, struct detached_freelist *df)
d0ecd894
JDB
3049{
3050 size_t first_skipped_index = 0;
3051 int lookahead = 3;
3052 void *object;
ca257195 3053 struct page *page;
fbd02630 3054
d0ecd894
JDB
3055 /* Always re-init detached_freelist */
3056 df->page = NULL;
fbd02630 3057
d0ecd894
JDB
3058 do {
3059 object = p[--size];
ca257195 3060 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
d0ecd894 3061 } while (!object && size);
3eed034d 3062
d0ecd894
JDB
3063 if (!object)
3064 return 0;
fbd02630 3065
ca257195
JDB
3066 page = virt_to_head_page(object);
3067 if (!s) {
3068 /* Handle kalloc'ed objects */
3069 if (unlikely(!PageSlab(page))) {
3070 BUG_ON(!PageCompound(page));
3071 kfree_hook(object);
4949148a 3072 __free_pages(page, compound_order(page));
ca257195
JDB
3073 p[size] = NULL; /* mark object processed */
3074 return size;
3075 }
3076 /* Derive kmem_cache from object */
3077 df->s = page->slab_cache;
3078 } else {
3079 df->s = cache_from_obj(s, object); /* Support for memcg */
3080 }
376bf125 3081
d0ecd894 3082 /* Start new detached freelist */
ca257195 3083 df->page = page;
376bf125 3084 set_freepointer(df->s, object, NULL);
d0ecd894
JDB
3085 df->tail = object;
3086 df->freelist = object;
3087 p[size] = NULL; /* mark object processed */
3088 df->cnt = 1;
3089
3090 while (size) {
3091 object = p[--size];
3092 if (!object)
3093 continue; /* Skip processed objects */
3094
3095 /* df->page is always set at this point */
3096 if (df->page == virt_to_head_page(object)) {
3097 /* Opportunity build freelist */
376bf125 3098 set_freepointer(df->s, object, df->freelist);
d0ecd894
JDB
3099 df->freelist = object;
3100 df->cnt++;
3101 p[size] = NULL; /* mark object processed */
3102
3103 continue;
fbd02630 3104 }
d0ecd894
JDB
3105
3106 /* Limit look ahead search */
3107 if (!--lookahead)
3108 break;
3109
3110 if (!first_skipped_index)
3111 first_skipped_index = size + 1;
fbd02630 3112 }
d0ecd894
JDB
3113
3114 return first_skipped_index;
3115}
3116
d0ecd894 3117/* Note that interrupts must be enabled when calling this function. */
376bf125 3118void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
d0ecd894
JDB
3119{
3120 if (WARN_ON(!size))
3121 return;
3122
3123 do {
3124 struct detached_freelist df;
3125
3126 size = build_detached_freelist(s, size, p, &df);
84582c8a 3127 if (!df.page)
d0ecd894
JDB
3128 continue;
3129
376bf125 3130 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
d0ecd894 3131 } while (likely(size));
484748f0
CL
3132}
3133EXPORT_SYMBOL(kmem_cache_free_bulk);
3134
994eb764 3135/* Note that interrupts must be enabled when calling this function. */
865762a8
JDB
3136int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3137 void **p)
484748f0 3138{
994eb764
JDB
3139 struct kmem_cache_cpu *c;
3140 int i;
3141
03ec0ed5
JDB
3142 /* memcg and kmem_cache debug support */
3143 s = slab_pre_alloc_hook(s, flags);
3144 if (unlikely(!s))
3145 return false;
994eb764
JDB
3146 /*
3147 * Drain objects in the per cpu slab, while disabling local
3148 * IRQs, which protects against PREEMPT and interrupts
3149 * handlers invoking normal fastpath.
3150 */
3151 local_irq_disable();
3152 c = this_cpu_ptr(s->cpu_slab);
3153
3154 for (i = 0; i < size; i++) {
3155 void *object = c->freelist;
3156
ebe909e0 3157 if (unlikely(!object)) {
ebe909e0
JDB
3158 /*
3159 * Invoking slow path likely have side-effect
3160 * of re-populating per CPU c->freelist
3161 */
87098373 3162 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
ebe909e0 3163 _RET_IP_, c);
87098373
CL
3164 if (unlikely(!p[i]))
3165 goto error;
3166
ebe909e0
JDB
3167 c = this_cpu_ptr(s->cpu_slab);
3168 continue; /* goto for-loop */
3169 }
994eb764
JDB
3170 c->freelist = get_freepointer(s, object);
3171 p[i] = object;
3172 }
3173 c->tid = next_tid(c->tid);
3174 local_irq_enable();
3175
3176 /* Clear memory outside IRQ disabled fastpath loop */
3177 if (unlikely(flags & __GFP_ZERO)) {
3178 int j;
3179
3180 for (j = 0; j < i; j++)
3181 memset(p[j], 0, s->object_size);
3182 }
3183
03ec0ed5
JDB
3184 /* memcg and kmem_cache debug support */
3185 slab_post_alloc_hook(s, flags, size, p);
865762a8 3186 return i;
87098373 3187error:
87098373 3188 local_irq_enable();
03ec0ed5
JDB
3189 slab_post_alloc_hook(s, flags, i, p);
3190 __kmem_cache_free_bulk(s, i, p);
865762a8 3191 return 0;
484748f0
CL
3192}
3193EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3194
3195
81819f0f 3196/*
672bba3a
CL
3197 * Object placement in a slab is made very easy because we always start at
3198 * offset 0. If we tune the size of the object to the alignment then we can
3199 * get the required alignment by putting one properly sized object after
3200 * another.
81819f0f
CL
3201 *
3202 * Notice that the allocation order determines the sizes of the per cpu
3203 * caches. Each processor has always one slab available for allocations.
3204 * Increasing the allocation order reduces the number of times that slabs
672bba3a 3205 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 3206 * locking overhead.
81819f0f
CL
3207 */
3208
3209/*
3210 * Mininum / Maximum order of slab pages. This influences locking overhead
3211 * and slab fragmentation. A higher order reduces the number of partial slabs
3212 * and increases the number of allocations possible without having to
3213 * take the list_lock.
3214 */
19af27af
AD
3215static unsigned int slub_min_order;
3216static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3217static unsigned int slub_min_objects;
81819f0f 3218
81819f0f
CL
3219/*
3220 * Calculate the order of allocation given an slab object size.
3221 *
672bba3a
CL
3222 * The order of allocation has significant impact on performance and other
3223 * system components. Generally order 0 allocations should be preferred since
3224 * order 0 does not cause fragmentation in the page allocator. Larger objects
3225 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 3226 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
3227 * would be wasted.
3228 *
3229 * In order to reach satisfactory performance we must ensure that a minimum
3230 * number of objects is in one slab. Otherwise we may generate too much
3231 * activity on the partial lists which requires taking the list_lock. This is
3232 * less a concern for large slabs though which are rarely used.
81819f0f 3233 *
672bba3a
CL
3234 * slub_max_order specifies the order where we begin to stop considering the
3235 * number of objects in a slab as critical. If we reach slub_max_order then
3236 * we try to keep the page order as low as possible. So we accept more waste
3237 * of space in favor of a small page order.
81819f0f 3238 *
672bba3a
CL
3239 * Higher order allocations also allow the placement of more objects in a
3240 * slab and thereby reduce object handling overhead. If the user has
3241 * requested a higher mininum order then we start with that one instead of
3242 * the smallest order which will fit the object.
81819f0f 3243 */
19af27af
AD
3244static inline unsigned int slab_order(unsigned int size,
3245 unsigned int min_objects, unsigned int max_order,
9736d2a9 3246 unsigned int fract_leftover)
81819f0f 3247{
19af27af
AD
3248 unsigned int min_order = slub_min_order;
3249 unsigned int order;
81819f0f 3250
9736d2a9 3251 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
210b5c06 3252 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b26464 3253
9736d2a9 3254 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
5e6d444e 3255 order <= max_order; order++) {
81819f0f 3256
19af27af
AD
3257 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3258 unsigned int rem;
81819f0f 3259
9736d2a9 3260 rem = slab_size % size;
81819f0f 3261
5e6d444e 3262 if (rem <= slab_size / fract_leftover)
81819f0f 3263 break;
81819f0f 3264 }
672bba3a 3265
81819f0f
CL
3266 return order;
3267}
3268
9736d2a9 3269static inline int calculate_order(unsigned int size)
5e6d444e 3270{
19af27af
AD
3271 unsigned int order;
3272 unsigned int min_objects;
3273 unsigned int max_objects;
5e6d444e
CL
3274
3275 /*
3276 * Attempt to find best configuration for a slab. This
3277 * works by first attempting to generate a layout with
3278 * the best configuration and backing off gradually.
3279 *
422ff4d7 3280 * First we increase the acceptable waste in a slab. Then
5e6d444e
CL
3281 * we reduce the minimum objects required in a slab.
3282 */
3283 min_objects = slub_min_objects;
9b2cd506
CL
3284 if (!min_objects)
3285 min_objects = 4 * (fls(nr_cpu_ids) + 1);
9736d2a9 3286 max_objects = order_objects(slub_max_order, size);
e8120ff1
ZY
3287 min_objects = min(min_objects, max_objects);
3288
5e6d444e 3289 while (min_objects > 1) {
19af27af
AD
3290 unsigned int fraction;
3291
c124f5b5 3292 fraction = 16;
5e6d444e
CL
3293 while (fraction >= 4) {
3294 order = slab_order(size, min_objects,
9736d2a9 3295 slub_max_order, fraction);
5e6d444e
CL
3296 if (order <= slub_max_order)
3297 return order;
3298 fraction /= 2;
3299 }
5086c389 3300 min_objects--;
5e6d444e
CL
3301 }
3302
3303 /*
3304 * We were unable to place multiple objects in a slab. Now
3305 * lets see if we can place a single object there.
3306 */
9736d2a9 3307 order = slab_order(size, 1, slub_max_order, 1);
5e6d444e
CL
3308 if (order <= slub_max_order)
3309 return order;
3310
3311 /*
3312 * Doh this slab cannot be placed using slub_max_order.
3313 */
9736d2a9 3314 order = slab_order(size, 1, MAX_ORDER, 1);
818cf590 3315 if (order < MAX_ORDER)
5e6d444e
CL
3316 return order;
3317 return -ENOSYS;
3318}
3319
5595cffc 3320static void
4053497d 3321init_kmem_cache_node(struct kmem_cache_node *n)
81819f0f
CL
3322{
3323 n->nr_partial = 0;
81819f0f
CL
3324 spin_lock_init(&n->list_lock);
3325 INIT_LIST_HEAD(&n->partial);
8ab1372f 3326#ifdef CONFIG_SLUB_DEBUG
0f389ec6 3327 atomic_long_set(&n->nr_slabs, 0);
02b71b70 3328 atomic_long_set(&n->total_objects, 0);
643b1138 3329 INIT_LIST_HEAD(&n->full);
8ab1372f 3330#endif
81819f0f
CL
3331}
3332
55136592 3333static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355 3334{
6c182dc0 3335 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
95a05b42 3336 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
4c93c355 3337
8a5ec0ba 3338 /*
d4d84fef
CM
3339 * Must align to double word boundary for the double cmpxchg
3340 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba 3341 */
d4d84fef
CM
3342 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3343 2 * sizeof(void *));
8a5ec0ba
CL
3344
3345 if (!s->cpu_slab)
3346 return 0;
3347
3348 init_kmem_cache_cpus(s);
4c93c355 3349
8a5ec0ba 3350 return 1;
4c93c355 3351}
4c93c355 3352
51df1142
CL
3353static struct kmem_cache *kmem_cache_node;
3354
81819f0f
CL
3355/*
3356 * No kmalloc_node yet so do it by hand. We know that this is the first
3357 * slab on the node for this slabcache. There are no concurrent accesses
3358 * possible.
3359 *
721ae22a
ZYW
3360 * Note that this function only works on the kmem_cache_node
3361 * when allocating for the kmem_cache_node. This is used for bootstrapping
4c93c355 3362 * memory on a fresh node that has no slab structures yet.
81819f0f 3363 */
55136592 3364static void early_kmem_cache_node_alloc(int node)
81819f0f
CL
3365{
3366 struct page *page;
3367 struct kmem_cache_node *n;
3368
51df1142 3369 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0f 3370
51df1142 3371 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0f
CL
3372
3373 BUG_ON(!page);
a2f92ee7 3374 if (page_to_nid(page) != node) {
f9f58285
FF
3375 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3376 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
a2f92ee7
CL
3377 }
3378
81819f0f
CL
3379 n = page->freelist;
3380 BUG_ON(!n);
8ab1372f 3381#ifdef CONFIG_SLUB_DEBUG
f7cb1933 3382 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df1142 3383 init_tracking(kmem_cache_node, n);
8ab1372f 3384#endif
12b22386 3385 n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
505f5dcb 3386 GFP_KERNEL);
12b22386
AK
3387 page->freelist = get_freepointer(kmem_cache_node, n);
3388 page->inuse = 1;
3389 page->frozen = 0;
3390 kmem_cache_node->node[node] = n;
4053497d 3391 init_kmem_cache_node(n);
51df1142 3392 inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2 3393
67b6c900 3394 /*
1e4dd946
SR
3395 * No locks need to be taken here as it has just been
3396 * initialized and there is no concurrent access.
67b6c900 3397 */
1e4dd946 3398 __add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0f
CL
3399}
3400
3401static void free_kmem_cache_nodes(struct kmem_cache *s)
3402{
3403 int node;
fa45dc25 3404 struct kmem_cache_node *n;
81819f0f 3405
fa45dc25 3406 for_each_kmem_cache_node(s, node, n) {
81819f0f 3407 s->node[node] = NULL;
ea37df54 3408 kmem_cache_free(kmem_cache_node, n);
81819f0f
CL
3409 }
3410}
3411
52b4b950
DS
3412void __kmem_cache_release(struct kmem_cache *s)
3413{
210e7a43 3414 cache_random_seq_destroy(s);
52b4b950
DS
3415 free_percpu(s->cpu_slab);
3416 free_kmem_cache_nodes(s);
3417}
3418
55136592 3419static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0f
CL
3420{
3421 int node;
81819f0f 3422
f64dc58c 3423 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3424 struct kmem_cache_node *n;
3425
73367bd8 3426 if (slab_state == DOWN) {
55136592 3427 early_kmem_cache_node_alloc(node);
73367bd8
AD
3428 continue;
3429 }
51df1142 3430 n = kmem_cache_alloc_node(kmem_cache_node,
55136592 3431 GFP_KERNEL, node);
81819f0f 3432
73367bd8
AD
3433 if (!n) {
3434 free_kmem_cache_nodes(s);
3435 return 0;
81819f0f 3436 }
73367bd8 3437
4053497d 3438 init_kmem_cache_node(n);
ea37df54 3439 s->node[node] = n;
81819f0f
CL
3440 }
3441 return 1;
3442}
81819f0f 3443
c0bdb232 3444static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d8
DR
3445{
3446 if (min < MIN_PARTIAL)
3447 min = MIN_PARTIAL;
3448 else if (min > MAX_PARTIAL)
3449 min = MAX_PARTIAL;
3450 s->min_partial = min;
3451}
3452
e6d0e1dc
WY
3453static void set_cpu_partial(struct kmem_cache *s)
3454{
3455#ifdef CONFIG_SLUB_CPU_PARTIAL
3456 /*
3457 * cpu_partial determined the maximum number of objects kept in the
3458 * per cpu partial lists of a processor.
3459 *
3460 * Per cpu partial lists mainly contain slabs that just have one
3461 * object freed. If they are used for allocation then they can be
3462 * filled up again with minimal effort. The slab will never hit the
3463 * per node partial lists and therefore no locking will be required.
3464 *
3465 * This setting also determines
3466 *
3467 * A) The number of objects from per cpu partial slabs dumped to the
3468 * per node list when we reach the limit.
3469 * B) The number of objects in cpu partial slabs to extract from the
3470 * per node list when we run out of per cpu objects. We only fetch
3471 * 50% to keep some capacity around for frees.
3472 */
3473 if (!kmem_cache_has_cpu_partial(s))
3474 s->cpu_partial = 0;
3475 else if (s->size >= PAGE_SIZE)
3476 s->cpu_partial = 2;
3477 else if (s->size >= 1024)
3478 s->cpu_partial = 6;
3479 else if (s->size >= 256)
3480 s->cpu_partial = 13;
3481 else
3482 s->cpu_partial = 30;
3483#endif
3484}
3485
81819f0f
CL
3486/*
3487 * calculate_sizes() determines the order and the distribution of data within
3488 * a slab object.
3489 */
06b285dc 3490static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f 3491{
d50112ed 3492 slab_flags_t flags = s->flags;
be4a7988 3493 unsigned int size = s->object_size;
19af27af 3494 unsigned int order;
81819f0f 3495
d8b42bf5
CL
3496 /*
3497 * Round up object size to the next word boundary. We can only
3498 * place the free pointer at word boundaries and this determines
3499 * the possible location of the free pointer.
3500 */
3501 size = ALIGN(size, sizeof(void *));
3502
3503#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3504 /*
3505 * Determine if we can poison the object itself. If the user of
3506 * the slab may touch the object after free or before allocation
3507 * then we should never poison the object itself.
3508 */
5f0d5a3a 3509 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
c59def9f 3510 !s->ctor)
81819f0f
CL
3511 s->flags |= __OBJECT_POISON;
3512 else
3513 s->flags &= ~__OBJECT_POISON;
3514
81819f0f
CL
3515
3516 /*
672bba3a 3517 * If we are Redzoning then check if there is some space between the
81819f0f 3518 * end of the object and the free pointer. If not then add an
672bba3a 3519 * additional word to have some bytes to store Redzone information.
81819f0f 3520 */
3b0efdfa 3521 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
81819f0f 3522 size += sizeof(void *);
41ecc55b 3523#endif
81819f0f
CL
3524
3525 /*
672bba3a
CL
3526 * With that we have determined the number of bytes in actual use
3527 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
3528 */
3529 s->inuse = size;
3530
5f0d5a3a 3531 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
c59def9f 3532 s->ctor)) {
81819f0f
CL
3533 /*
3534 * Relocate free pointer after the object if it is not
3535 * permitted to overwrite the first word of the object on
3536 * kmem_cache_free.
3537 *
3538 * This is the case if we do RCU, have a constructor or
3539 * destructor or are poisoning the objects.
3540 */
3541 s->offset = size;
3542 size += sizeof(void *);
3543 }
3544
c12b3c62 3545#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3546 if (flags & SLAB_STORE_USER)
3547 /*
3548 * Need to store information about allocs and frees after
3549 * the object.
3550 */
3551 size += 2 * sizeof(struct track);
80a9201a 3552#endif
81819f0f 3553
80a9201a
AP
3554 kasan_cache_create(s, &size, &s->flags);
3555#ifdef CONFIG_SLUB_DEBUG
d86bd1be 3556 if (flags & SLAB_RED_ZONE) {
81819f0f
CL
3557 /*
3558 * Add some empty padding so that we can catch
3559 * overwrites from earlier objects rather than let
3560 * tracking information or the free pointer be
0211a9c8 3561 * corrupted if a user writes before the start
81819f0f
CL
3562 * of the object.
3563 */
3564 size += sizeof(void *);
d86bd1be
JK
3565
3566 s->red_left_pad = sizeof(void *);
3567 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3568 size += s->red_left_pad;
3569 }
41ecc55b 3570#endif
672bba3a 3571
81819f0f
CL
3572 /*
3573 * SLUB stores one object immediately after another beginning from
3574 * offset 0. In order to align the objects we have to simply size
3575 * each object to conform to the alignment.
3576 */
45906855 3577 size = ALIGN(size, s->align);
81819f0f 3578 s->size = size;
06b285dc
CL
3579 if (forced_order >= 0)
3580 order = forced_order;
3581 else
9736d2a9 3582 order = calculate_order(size);
81819f0f 3583
19af27af 3584 if ((int)order < 0)
81819f0f
CL
3585 return 0;
3586
b7a49f0d 3587 s->allocflags = 0;
834f3d11 3588 if (order)
b7a49f0d
CL
3589 s->allocflags |= __GFP_COMP;
3590
3591 if (s->flags & SLAB_CACHE_DMA)
2c59dd65 3592 s->allocflags |= GFP_DMA;
b7a49f0d
CL
3593
3594 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3595 s->allocflags |= __GFP_RECLAIMABLE;
3596
81819f0f
CL
3597 /*
3598 * Determine the number of objects per slab
3599 */
9736d2a9
MW
3600 s->oo = oo_make(order, size);
3601 s->min = oo_make(get_order(size), size);
205ab99d
CL
3602 if (oo_objects(s->oo) > oo_objects(s->max))
3603 s->max = s->oo;
81819f0f 3604
834f3d11 3605 return !!oo_objects(s->oo);
81819f0f
CL
3606}
3607
d50112ed 3608static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
81819f0f 3609{
8a13a4cc 3610 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
2482ddec
KC
3611#ifdef CONFIG_SLAB_FREELIST_HARDENED
3612 s->random = get_random_long();
3613#endif
81819f0f 3614
06b285dc 3615 if (!calculate_sizes(s, -1))
81819f0f 3616 goto error;
3de47213
DR
3617 if (disable_higher_order_debug) {
3618 /*
3619 * Disable debugging flags that store metadata if the min slab
3620 * order increased.
3621 */
3b0efdfa 3622 if (get_order(s->size) > get_order(s->object_size)) {
3de47213
DR
3623 s->flags &= ~DEBUG_METADATA_FLAGS;
3624 s->offset = 0;
3625 if (!calculate_sizes(s, -1))
3626 goto error;
3627 }
3628 }
81819f0f 3629
2565409f
HC
3630#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3631 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
149daaf3 3632 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
b789ef51
CL
3633 /* Enable fast mode */
3634 s->flags |= __CMPXCHG_DOUBLE;
3635#endif
3636
3b89d7d8
DR
3637 /*
3638 * The larger the object size is, the more pages we want on the partial
3639 * list to avoid pounding the page allocator excessively.
3640 */
49e22585
CL
3641 set_min_partial(s, ilog2(s->size) / 2);
3642
e6d0e1dc 3643 set_cpu_partial(s);
49e22585 3644
81819f0f 3645#ifdef CONFIG_NUMA
e2cb96b7 3646 s->remote_node_defrag_ratio = 1000;
81819f0f 3647#endif
210e7a43
TG
3648
3649 /* Initialize the pre-computed randomized freelist if slab is up */
3650 if (slab_state >= UP) {
3651 if (init_cache_random_seq(s))
3652 goto error;
3653 }
3654
55136592 3655 if (!init_kmem_cache_nodes(s))
dfb4f096 3656 goto error;
81819f0f 3657
55136592 3658 if (alloc_kmem_cache_cpus(s))
278b1bb1 3659 return 0;
ff12059e 3660
4c93c355 3661 free_kmem_cache_nodes(s);
81819f0f
CL
3662error:
3663 if (flags & SLAB_PANIC)
44065b2e
AD
3664 panic("Cannot create slab %s size=%u realsize=%u order=%u offset=%u flags=%lx\n",
3665 s->name, s->size, s->size,
4fd0b46e 3666 oo_order(s->oo), s->offset, (unsigned long)flags);
278b1bb1 3667 return -EINVAL;
81819f0f 3668}
81819f0f 3669
33b12c38
CL
3670static void list_slab_objects(struct kmem_cache *s, struct page *page,
3671 const char *text)
3672{
3673#ifdef CONFIG_SLUB_DEBUG
3674 void *addr = page_address(page);
3675 void *p;
0684e652 3676 unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
bbd7d57b
ED
3677 if (!map)
3678 return;
945cf2b6 3679 slab_err(s, page, text, s->name);
33b12c38 3680 slab_lock(page);
33b12c38 3681
5f80b13a 3682 get_map(s, page, map);
33b12c38
CL
3683 for_each_object(p, s, addr, page->objects) {
3684
3685 if (!test_bit(slab_index(p, s, addr), map)) {
f9f58285 3686 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
33b12c38
CL
3687 print_tracking(s, p);
3688 }
3689 }
3690 slab_unlock(page);
0684e652 3691 bitmap_free(map);
33b12c38
CL
3692#endif
3693}
3694
81819f0f 3695/*
599870b1 3696 * Attempt to free all partial slabs on a node.
52b4b950
DS
3697 * This is called from __kmem_cache_shutdown(). We must take list_lock
3698 * because sysfs file might still access partial list after the shutdowning.
81819f0f 3699 */
599870b1 3700static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 3701{
60398923 3702 LIST_HEAD(discard);
81819f0f
CL
3703 struct page *page, *h;
3704
52b4b950
DS
3705 BUG_ON(irqs_disabled());
3706 spin_lock_irq(&n->list_lock);
33b12c38 3707 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f 3708 if (!page->inuse) {
52b4b950 3709 remove_partial(n, page);
60398923 3710 list_add(&page->lru, &discard);
33b12c38
CL
3711 } else {
3712 list_slab_objects(s, page,
52b4b950 3713 "Objects remaining in %s on __kmem_cache_shutdown()");
599870b1 3714 }
33b12c38 3715 }
52b4b950 3716 spin_unlock_irq(&n->list_lock);
60398923
CW
3717
3718 list_for_each_entry_safe(page, h, &discard, lru)
3719 discard_slab(s, page);
81819f0f
CL
3720}
3721
f9e13c0a
SB
3722bool __kmem_cache_empty(struct kmem_cache *s)
3723{
3724 int node;
3725 struct kmem_cache_node *n;
3726
3727 for_each_kmem_cache_node(s, node, n)
3728 if (n->nr_partial || slabs_node(s, node))
3729 return false;
3730 return true;
3731}
3732
81819f0f 3733/*
672bba3a 3734 * Release all resources used by a slab cache.
81819f0f 3735 */
52b4b950 3736int __kmem_cache_shutdown(struct kmem_cache *s)
81819f0f
CL
3737{
3738 int node;
fa45dc25 3739 struct kmem_cache_node *n;
81819f0f
CL
3740
3741 flush_all(s);
81819f0f 3742 /* Attempt to free all objects */
fa45dc25 3743 for_each_kmem_cache_node(s, node, n) {
599870b1
CL
3744 free_partial(s, n);
3745 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
3746 return 1;
3747 }
bf5eb3de 3748 sysfs_slab_remove(s);
81819f0f
CL
3749 return 0;
3750}
3751
81819f0f
CL
3752/********************************************************************
3753 * Kmalloc subsystem
3754 *******************************************************************/
3755
81819f0f
CL
3756static int __init setup_slub_min_order(char *str)
3757{
19af27af 3758 get_option(&str, (int *)&slub_min_order);
81819f0f
CL
3759
3760 return 1;
3761}
3762
3763__setup("slub_min_order=", setup_slub_min_order);
3764
3765static int __init setup_slub_max_order(char *str)
3766{
19af27af
AD
3767 get_option(&str, (int *)&slub_max_order);
3768 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
81819f0f
CL
3769
3770 return 1;
3771}
3772
3773__setup("slub_max_order=", setup_slub_max_order);
3774
3775static int __init setup_slub_min_objects(char *str)
3776{
19af27af 3777 get_option(&str, (int *)&slub_min_objects);
81819f0f
CL
3778
3779 return 1;
3780}
3781
3782__setup("slub_min_objects=", setup_slub_min_objects);
3783
81819f0f
CL
3784void *__kmalloc(size_t size, gfp_t flags)
3785{
aadb4bc4 3786 struct kmem_cache *s;
5b882be4 3787 void *ret;
81819f0f 3788
95a05b42 3789 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef 3790 return kmalloc_large(size, flags);
aadb4bc4 3791
2c59dd65 3792 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3793
3794 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3795 return s;
3796
2b847c3c 3797 ret = slab_alloc(s, flags, _RET_IP_);
5b882be4 3798
ca2b84cb 3799 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4 3800
0116523c 3801 ret = kasan_kmalloc(s, ret, size, flags);
0316bec2 3802
5b882be4 3803 return ret;
81819f0f
CL
3804}
3805EXPORT_SYMBOL(__kmalloc);
3806
5d1f57e4 3807#ifdef CONFIG_NUMA
f619cfe1
CL
3808static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3809{
b1eeab67 3810 struct page *page;
e4f7c0b4 3811 void *ptr = NULL;
f619cfe1 3812
75f296d9 3813 flags |= __GFP_COMP;
4949148a 3814 page = alloc_pages_node(node, flags, get_order(size));
f619cfe1 3815 if (page)
e4f7c0b4
CM
3816 ptr = page_address(page);
3817
0116523c 3818 return kmalloc_large_node_hook(ptr, size, flags);
f619cfe1
CL
3819}
3820
81819f0f
CL
3821void *__kmalloc_node(size_t size, gfp_t flags, int node)
3822{
aadb4bc4 3823 struct kmem_cache *s;
5b882be4 3824 void *ret;
81819f0f 3825
95a05b42 3826 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5b882be4
EGM
3827 ret = kmalloc_large_node(size, flags, node);
3828
ca2b84cb
EGM
3829 trace_kmalloc_node(_RET_IP_, ret,
3830 size, PAGE_SIZE << get_order(size),
3831 flags, node);
5b882be4
EGM
3832
3833 return ret;
3834 }
aadb4bc4 3835
2c59dd65 3836 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3837
3838 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3839 return s;
3840
2b847c3c 3841 ret = slab_alloc_node(s, flags, node, _RET_IP_);
5b882be4 3842
ca2b84cb 3843 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4 3844
0116523c 3845 ret = kasan_kmalloc(s, ret, size, flags);
0316bec2 3846
5b882be4 3847 return ret;
81819f0f
CL
3848}
3849EXPORT_SYMBOL(__kmalloc_node);
3850#endif
3851
ed18adc1
KC
3852#ifdef CONFIG_HARDENED_USERCOPY
3853/*
afcc90f8
KC
3854 * Rejects incorrectly sized objects and objects that are to be copied
3855 * to/from userspace but do not fall entirely within the containing slab
3856 * cache's usercopy region.
ed18adc1
KC
3857 *
3858 * Returns NULL if check passes, otherwise const char * to name of cache
3859 * to indicate an error.
3860 */
f4e6e289
KC
3861void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3862 bool to_user)
ed18adc1
KC
3863{
3864 struct kmem_cache *s;
44065b2e 3865 unsigned int offset;
ed18adc1
KC
3866 size_t object_size;
3867
96fedce2
AK
3868 ptr = kasan_reset_tag(ptr);
3869
ed18adc1
KC
3870 /* Find object and usable object size. */
3871 s = page->slab_cache;
ed18adc1
KC
3872
3873 /* Reject impossible pointers. */
3874 if (ptr < page_address(page))
f4e6e289
KC
3875 usercopy_abort("SLUB object not in SLUB page?!", NULL,
3876 to_user, 0, n);
ed18adc1
KC
3877
3878 /* Find offset within object. */
3879 offset = (ptr - page_address(page)) % s->size;
3880
3881 /* Adjust for redzone and reject if within the redzone. */
3882 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3883 if (offset < s->red_left_pad)
f4e6e289
KC
3884 usercopy_abort("SLUB object in left red zone",
3885 s->name, to_user, offset, n);
ed18adc1
KC
3886 offset -= s->red_left_pad;
3887 }
3888
afcc90f8
KC
3889 /* Allow address range falling entirely within usercopy region. */
3890 if (offset >= s->useroffset &&
3891 offset - s->useroffset <= s->usersize &&
3892 n <= s->useroffset - offset + s->usersize)
f4e6e289 3893 return;
ed18adc1 3894
afcc90f8
KC
3895 /*
3896 * If the copy is still within the allocated object, produce
3897 * a warning instead of rejecting the copy. This is intended
3898 * to be a temporary method to find any missing usercopy
3899 * whitelists.
3900 */
3901 object_size = slab_ksize(s);
2d891fbc
KC
3902 if (usercopy_fallback &&
3903 offset <= object_size && n <= object_size - offset) {
afcc90f8
KC
3904 usercopy_warn("SLUB object", s->name, to_user, offset, n);
3905 return;
3906 }
ed18adc1 3907
f4e6e289 3908 usercopy_abort("SLUB object", s->name, to_user, offset, n);
ed18adc1
KC
3909}
3910#endif /* CONFIG_HARDENED_USERCOPY */
3911
0316bec2 3912static size_t __ksize(const void *object)
81819f0f 3913{
272c1d21 3914 struct page *page;
81819f0f 3915
ef8b4520 3916 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
3917 return 0;
3918
294a80a8 3919 page = virt_to_head_page(object);
294a80a8 3920
76994412
PE
3921 if (unlikely(!PageSlab(page))) {
3922 WARN_ON(!PageCompound(page));
294a80a8 3923 return PAGE_SIZE << compound_order(page);
76994412 3924 }
81819f0f 3925
1b4f59e3 3926 return slab_ksize(page->slab_cache);
81819f0f 3927}
0316bec2
AR
3928
3929size_t ksize(const void *object)
3930{
3931 size_t size = __ksize(object);
3932 /* We assume that ksize callers could use whole allocated area,
4ebb31a4
AP
3933 * so we need to unpoison this area.
3934 */
3935 kasan_unpoison_shadow(object, size);
0316bec2
AR
3936 return size;
3937}
b1aabecd 3938EXPORT_SYMBOL(ksize);
81819f0f
CL
3939
3940void kfree(const void *x)
3941{
81819f0f 3942 struct page *page;
5bb983b0 3943 void *object = (void *)x;
81819f0f 3944
2121db74
PE
3945 trace_kfree(_RET_IP_, x);
3946
2408c550 3947 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
3948 return;
3949
b49af68f 3950 page = virt_to_head_page(x);
aadb4bc4 3951 if (unlikely(!PageSlab(page))) {
0937502a 3952 BUG_ON(!PageCompound(page));
47adccce 3953 kfree_hook(object);
4949148a 3954 __free_pages(page, compound_order(page));
aadb4bc4
CL
3955 return;
3956 }
81084651 3957 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
81819f0f
CL
3958}
3959EXPORT_SYMBOL(kfree);
3960
832f37f5
VD
3961#define SHRINK_PROMOTE_MAX 32
3962
2086d26a 3963/*
832f37f5
VD
3964 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3965 * up most to the head of the partial lists. New allocations will then
3966 * fill those up and thus they can be removed from the partial lists.
672bba3a
CL
3967 *
3968 * The slabs with the least items are placed last. This results in them
3969 * being allocated from last increasing the chance that the last objects
3970 * are freed in them.
2086d26a 3971 */
c9fc5864 3972int __kmem_cache_shrink(struct kmem_cache *s)
2086d26a
CL
3973{
3974 int node;
3975 int i;
3976 struct kmem_cache_node *n;
3977 struct page *page;
3978 struct page *t;
832f37f5
VD
3979 struct list_head discard;
3980 struct list_head promote[SHRINK_PROMOTE_MAX];
2086d26a 3981 unsigned long flags;
ce3712d7 3982 int ret = 0;
2086d26a 3983
2086d26a 3984 flush_all(s);
fa45dc25 3985 for_each_kmem_cache_node(s, node, n) {
832f37f5
VD
3986 INIT_LIST_HEAD(&discard);
3987 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3988 INIT_LIST_HEAD(promote + i);
2086d26a
CL
3989
3990 spin_lock_irqsave(&n->list_lock, flags);
3991
3992 /*
832f37f5 3993 * Build lists of slabs to discard or promote.
2086d26a 3994 *
672bba3a
CL
3995 * Note that concurrent frees may occur while we hold the
3996 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
3997 */
3998 list_for_each_entry_safe(page, t, &n->partial, lru) {
832f37f5
VD
3999 int free = page->objects - page->inuse;
4000
4001 /* Do not reread page->inuse */
4002 barrier();
4003
4004 /* We do not keep full slabs on the list */
4005 BUG_ON(free <= 0);
4006
4007 if (free == page->objects) {
4008 list_move(&page->lru, &discard);
69cb8e6b 4009 n->nr_partial--;
832f37f5
VD
4010 } else if (free <= SHRINK_PROMOTE_MAX)
4011 list_move(&page->lru, promote + free - 1);
2086d26a
CL
4012 }
4013
2086d26a 4014 /*
832f37f5
VD
4015 * Promote the slabs filled up most to the head of the
4016 * partial list.
2086d26a 4017 */
832f37f5
VD
4018 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4019 list_splice(promote + i, &n->partial);
2086d26a 4020
2086d26a 4021 spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b
CL
4022
4023 /* Release empty slabs */
832f37f5 4024 list_for_each_entry_safe(page, t, &discard, lru)
69cb8e6b 4025 discard_slab(s, page);
ce3712d7
VD
4026
4027 if (slabs_node(s, node))
4028 ret = 1;
2086d26a
CL
4029 }
4030
ce3712d7 4031 return ret;
2086d26a 4032}
2086d26a 4033
c9fc5864 4034#ifdef CONFIG_MEMCG
01fb58bc
TH
4035static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
4036{
50862ce7
TH
4037 /*
4038 * Called with all the locks held after a sched RCU grace period.
4039 * Even if @s becomes empty after shrinking, we can't know that @s
4040 * doesn't have allocations already in-flight and thus can't
4041 * destroy @s until the associated memcg is released.
4042 *
4043 * However, let's remove the sysfs files for empty caches here.
4044 * Each cache has a lot of interface files which aren't
4045 * particularly useful for empty draining caches; otherwise, we can
4046 * easily end up with millions of unnecessary sysfs files on
4047 * systems which have a lot of memory and transient cgroups.
4048 */
4049 if (!__kmem_cache_shrink(s))
4050 sysfs_slab_remove(s);
01fb58bc
TH
4051}
4052
c9fc5864
TH
4053void __kmemcg_cache_deactivate(struct kmem_cache *s)
4054{
4055 /*
4056 * Disable empty slabs caching. Used to avoid pinning offline
4057 * memory cgroups by kmem pages that can be freed.
4058 */
e6d0e1dc 4059 slub_set_cpu_partial(s, 0);
c9fc5864
TH
4060 s->min_partial = 0;
4061
4062 /*
4063 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
01fb58bc 4064 * we have to make sure the change is visible before shrinking.
c9fc5864 4065 */
01fb58bc 4066 slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
c9fc5864
TH
4067}
4068#endif
4069
b9049e23
YG
4070static int slab_mem_going_offline_callback(void *arg)
4071{
4072 struct kmem_cache *s;
4073
18004c5d 4074 mutex_lock(&slab_mutex);
b9049e23 4075 list_for_each_entry(s, &slab_caches, list)
c9fc5864 4076 __kmem_cache_shrink(s);
18004c5d 4077 mutex_unlock(&slab_mutex);
b9049e23
YG
4078
4079 return 0;
4080}
4081
4082static void slab_mem_offline_callback(void *arg)
4083{
4084 struct kmem_cache_node *n;
4085 struct kmem_cache *s;
4086 struct memory_notify *marg = arg;
4087 int offline_node;
4088
b9d5ab25 4089 offline_node = marg->status_change_nid_normal;
b9049e23
YG
4090
4091 /*
4092 * If the node still has available memory. we need kmem_cache_node
4093 * for it yet.
4094 */
4095 if (offline_node < 0)
4096 return;
4097
18004c5d 4098 mutex_lock(&slab_mutex);
b9049e23
YG
4099 list_for_each_entry(s, &slab_caches, list) {
4100 n = get_node(s, offline_node);
4101 if (n) {
4102 /*
4103 * if n->nr_slabs > 0, slabs still exist on the node
4104 * that is going down. We were unable to free them,
c9404c9c 4105 * and offline_pages() function shouldn't call this
b9049e23
YG
4106 * callback. So, we must fail.
4107 */
0f389ec6 4108 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
4109
4110 s->node[offline_node] = NULL;
8de66a0c 4111 kmem_cache_free(kmem_cache_node, n);
b9049e23
YG
4112 }
4113 }
18004c5d 4114 mutex_unlock(&slab_mutex);
b9049e23
YG
4115}
4116
4117static int slab_mem_going_online_callback(void *arg)
4118{
4119 struct kmem_cache_node *n;
4120 struct kmem_cache *s;
4121 struct memory_notify *marg = arg;
b9d5ab25 4122 int nid = marg->status_change_nid_normal;
b9049e23
YG
4123 int ret = 0;
4124
4125 /*
4126 * If the node's memory is already available, then kmem_cache_node is
4127 * already created. Nothing to do.
4128 */
4129 if (nid < 0)
4130 return 0;
4131
4132 /*
0121c619 4133 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
4134 * allocate a kmem_cache_node structure in order to bring the node
4135 * online.
4136 */
18004c5d 4137 mutex_lock(&slab_mutex);
b9049e23
YG
4138 list_for_each_entry(s, &slab_caches, list) {
4139 /*
4140 * XXX: kmem_cache_alloc_node will fallback to other nodes
4141 * since memory is not yet available from the node that
4142 * is brought up.
4143 */
8de66a0c 4144 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e23
YG
4145 if (!n) {
4146 ret = -ENOMEM;
4147 goto out;
4148 }
4053497d 4149 init_kmem_cache_node(n);
b9049e23
YG
4150 s->node[nid] = n;
4151 }
4152out:
18004c5d 4153 mutex_unlock(&slab_mutex);
b9049e23
YG
4154 return ret;
4155}
4156
4157static int slab_memory_callback(struct notifier_block *self,
4158 unsigned long action, void *arg)
4159{
4160 int ret = 0;
4161
4162 switch (action) {
4163 case MEM_GOING_ONLINE:
4164 ret = slab_mem_going_online_callback(arg);
4165 break;
4166 case MEM_GOING_OFFLINE:
4167 ret = slab_mem_going_offline_callback(arg);
4168 break;
4169 case MEM_OFFLINE:
4170 case MEM_CANCEL_ONLINE:
4171 slab_mem_offline_callback(arg);
4172 break;
4173 case MEM_ONLINE:
4174 case MEM_CANCEL_OFFLINE:
4175 break;
4176 }
dc19f9db
KH
4177 if (ret)
4178 ret = notifier_from_errno(ret);
4179 else
4180 ret = NOTIFY_OK;
b9049e23
YG
4181 return ret;
4182}
4183
3ac38faa
AM
4184static struct notifier_block slab_memory_callback_nb = {
4185 .notifier_call = slab_memory_callback,
4186 .priority = SLAB_CALLBACK_PRI,
4187};
b9049e23 4188
81819f0f
CL
4189/********************************************************************
4190 * Basic setup of slabs
4191 *******************************************************************/
4192
51df1142
CL
4193/*
4194 * Used for early kmem_cache structures that were allocated using
dffb4d60
CL
4195 * the page allocator. Allocate them properly then fix up the pointers
4196 * that may be pointing to the wrong kmem_cache structure.
51df1142
CL
4197 */
4198
dffb4d60 4199static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
51df1142
CL
4200{
4201 int node;
dffb4d60 4202 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
fa45dc25 4203 struct kmem_cache_node *n;
51df1142 4204
dffb4d60 4205 memcpy(s, static_cache, kmem_cache->object_size);
51df1142 4206
7d557b3c
GC
4207 /*
4208 * This runs very early, and only the boot processor is supposed to be
4209 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4210 * IPIs around.
4211 */
4212 __flush_cpu_slab(s, smp_processor_id());
fa45dc25 4213 for_each_kmem_cache_node(s, node, n) {
51df1142
CL
4214 struct page *p;
4215
fa45dc25
CL
4216 list_for_each_entry(p, &n->partial, lru)
4217 p->slab_cache = s;
51df1142 4218
607bf324 4219#ifdef CONFIG_SLUB_DEBUG
fa45dc25
CL
4220 list_for_each_entry(p, &n->full, lru)
4221 p->slab_cache = s;
51df1142 4222#endif
51df1142 4223 }
f7ce3190 4224 slab_init_memcg_params(s);
dffb4d60 4225 list_add(&s->list, &slab_caches);
510ded33 4226 memcg_link_cache(s);
dffb4d60 4227 return s;
51df1142
CL
4228}
4229
81819f0f
CL
4230void __init kmem_cache_init(void)
4231{
dffb4d60
CL
4232 static __initdata struct kmem_cache boot_kmem_cache,
4233 boot_kmem_cache_node;
51df1142 4234
fc8d8620
SG
4235 if (debug_guardpage_minorder())
4236 slub_max_order = 0;
4237
dffb4d60
CL
4238 kmem_cache_node = &boot_kmem_cache_node;
4239 kmem_cache = &boot_kmem_cache;
51df1142 4240
dffb4d60 4241 create_boot_cache(kmem_cache_node, "kmem_cache_node",
8eb8284b 4242 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
b9049e23 4243
3ac38faa 4244 register_hotmemory_notifier(&slab_memory_callback_nb);
81819f0f
CL
4245
4246 /* Able to allocate the per node structures */
4247 slab_state = PARTIAL;
4248
dffb4d60
CL
4249 create_boot_cache(kmem_cache, "kmem_cache",
4250 offsetof(struct kmem_cache, node) +
4251 nr_node_ids * sizeof(struct kmem_cache_node *),
8eb8284b 4252 SLAB_HWCACHE_ALIGN, 0, 0);
8a13a4cc 4253
dffb4d60 4254 kmem_cache = bootstrap(&boot_kmem_cache);
dffb4d60 4255 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
51df1142
CL
4256
4257 /* Now we can use the kmem_cache to allocate kmalloc slabs */
34cc6990 4258 setup_kmalloc_cache_index_table();
f97d5f63 4259 create_kmalloc_caches(0);
81819f0f 4260
210e7a43
TG
4261 /* Setup random freelists for each cache */
4262 init_freelist_randomization();
4263
a96a87bf
SAS
4264 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4265 slub_cpu_dead);
81819f0f 4266
19af27af 4267 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
f97d5f63 4268 cache_line_size(),
81819f0f
CL
4269 slub_min_order, slub_max_order, slub_min_objects,
4270 nr_cpu_ids, nr_node_ids);
4271}
4272
7e85ee0c
PE
4273void __init kmem_cache_init_late(void)
4274{
7e85ee0c
PE
4275}
4276
2633d7a0 4277struct kmem_cache *
f4957d5b 4278__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112ed 4279 slab_flags_t flags, void (*ctor)(void *))
81819f0f 4280{
426589f5 4281 struct kmem_cache *s, *c;
81819f0f 4282
a44cb944 4283 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
4284 if (s) {
4285 s->refcount++;
84d0ddd6 4286
81819f0f
CL
4287 /*
4288 * Adjust the object sizes so that we clear
4289 * the complete object on kzalloc.
4290 */
1b473f29 4291 s->object_size = max(s->object_size, size);
52ee6d74 4292 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
6446faa2 4293
426589f5 4294 for_each_memcg_cache(c, s) {
84d0ddd6 4295 c->object_size = s->object_size;
52ee6d74 4296 c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
84d0ddd6
VD
4297 }
4298
7b8f3b66 4299 if (sysfs_slab_alias(s, name)) {
7b8f3b66 4300 s->refcount--;
cbb79694 4301 s = NULL;
7b8f3b66 4302 }
a0e1d1be 4303 }
6446faa2 4304
cbb79694
CL
4305 return s;
4306}
84c1cf62 4307
d50112ed 4308int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
cbb79694 4309{
aac3a166
PE
4310 int err;
4311
4312 err = kmem_cache_open(s, flags);
4313 if (err)
4314 return err;
20cea968 4315
45530c44
CL
4316 /* Mutex is not taken during early boot */
4317 if (slab_state <= UP)
4318 return 0;
4319
107dab5c 4320 memcg_propagate_slab_attrs(s);
aac3a166 4321 err = sysfs_slab_add(s);
aac3a166 4322 if (err)
52b4b950 4323 __kmem_cache_release(s);
20cea968 4324
aac3a166 4325 return err;
81819f0f 4326}
81819f0f 4327
ce71e27c 4328void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0f 4329{
aadb4bc4 4330 struct kmem_cache *s;
94b528d0 4331 void *ret;
aadb4bc4 4332
95a05b42 4333 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef
PE
4334 return kmalloc_large(size, gfpflags);
4335
2c59dd65 4336 s = kmalloc_slab(size, gfpflags);
81819f0f 4337
2408c550 4338 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4339 return s;
81819f0f 4340
2b847c3c 4341 ret = slab_alloc(s, gfpflags, caller);
94b528d0 4342
25985edc 4343 /* Honor the call site pointer we received. */
ca2b84cb 4344 trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d0
EGM
4345
4346 return ret;
81819f0f
CL
4347}
4348
5d1f57e4 4349#ifdef CONFIG_NUMA
81819f0f 4350void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c 4351 int node, unsigned long caller)
81819f0f 4352{
aadb4bc4 4353 struct kmem_cache *s;
94b528d0 4354 void *ret;
aadb4bc4 4355
95a05b42 4356 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
d3e14aa3
XF
4357 ret = kmalloc_large_node(size, gfpflags, node);
4358
4359 trace_kmalloc_node(caller, ret,
4360 size, PAGE_SIZE << get_order(size),
4361 gfpflags, node);
4362
4363 return ret;
4364 }
eada35ef 4365
2c59dd65 4366 s = kmalloc_slab(size, gfpflags);
81819f0f 4367
2408c550 4368 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4369 return s;
81819f0f 4370
2b847c3c 4371 ret = slab_alloc_node(s, gfpflags, node, caller);
94b528d0 4372
25985edc 4373 /* Honor the call site pointer we received. */
ca2b84cb 4374 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d0
EGM
4375
4376 return ret;
81819f0f 4377}
5d1f57e4 4378#endif
81819f0f 4379
ab4d5ed5 4380#ifdef CONFIG_SYSFS
205ab99d
CL
4381static int count_inuse(struct page *page)
4382{
4383 return page->inuse;
4384}
4385
4386static int count_total(struct page *page)
4387{
4388 return page->objects;
4389}
ab4d5ed5 4390#endif
205ab99d 4391
ab4d5ed5 4392#ifdef CONFIG_SLUB_DEBUG
434e245d
CL
4393static int validate_slab(struct kmem_cache *s, struct page *page,
4394 unsigned long *map)
53e15af0
CL
4395{
4396 void *p;
a973e9dd 4397 void *addr = page_address(page);
53e15af0
CL
4398
4399 if (!check_slab(s, page) ||
4400 !on_freelist(s, page, NULL))
4401 return 0;
4402
4403 /* Now we know that a valid freelist exists */
39b26464 4404 bitmap_zero(map, page->objects);
53e15af0 4405
5f80b13a
CL
4406 get_map(s, page, map);
4407 for_each_object(p, s, addr, page->objects) {
4408 if (test_bit(slab_index(p, s, addr), map))
4409 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4410 return 0;
53e15af0
CL
4411 }
4412
224a88be 4413 for_each_object(p, s, addr, page->objects)
7656c72b 4414 if (!test_bit(slab_index(p, s, addr), map))
37d57443 4415 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af0
CL
4416 return 0;
4417 return 1;
4418}
4419
434e245d
CL
4420static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4421 unsigned long *map)
53e15af0 4422{
881db7fb
CL
4423 slab_lock(page);
4424 validate_slab(s, page, map);
4425 slab_unlock(page);
53e15af0
CL
4426}
4427
434e245d
CL
4428static int validate_slab_node(struct kmem_cache *s,
4429 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
4430{
4431 unsigned long count = 0;
4432 struct page *page;
4433 unsigned long flags;
4434
4435 spin_lock_irqsave(&n->list_lock, flags);
4436
4437 list_for_each_entry(page, &n->partial, lru) {
434e245d 4438 validate_slab_slab(s, page, map);
53e15af0
CL
4439 count++;
4440 }
4441 if (count != n->nr_partial)
f9f58285
FF
4442 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4443 s->name, count, n->nr_partial);
53e15af0
CL
4444
4445 if (!(s->flags & SLAB_STORE_USER))
4446 goto out;
4447
4448 list_for_each_entry(page, &n->full, lru) {
434e245d 4449 validate_slab_slab(s, page, map);
53e15af0
CL
4450 count++;
4451 }
4452 if (count != atomic_long_read(&n->nr_slabs))
f9f58285
FF
4453 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4454 s->name, count, atomic_long_read(&n->nr_slabs));
53e15af0
CL
4455
4456out:
4457 spin_unlock_irqrestore(&n->list_lock, flags);
4458 return count;
4459}
4460
434e245d 4461static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
4462{
4463 int node;
4464 unsigned long count = 0;
fa45dc25 4465 struct kmem_cache_node *n;
0684e652 4466 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
434e245d
CL
4467
4468 if (!map)
4469 return -ENOMEM;
53e15af0
CL
4470
4471 flush_all(s);
fa45dc25 4472 for_each_kmem_cache_node(s, node, n)
434e245d 4473 count += validate_slab_node(s, n, map);
0684e652 4474 bitmap_free(map);
53e15af0
CL
4475 return count;
4476}
88a420e4 4477/*
672bba3a 4478 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
4479 * and freed.
4480 */
4481
4482struct location {
4483 unsigned long count;
ce71e27c 4484 unsigned long addr;
45edfa58
CL
4485 long long sum_time;
4486 long min_time;
4487 long max_time;
4488 long min_pid;
4489 long max_pid;
174596a0 4490 DECLARE_BITMAP(cpus, NR_CPUS);
45edfa58 4491 nodemask_t nodes;
88a420e4
CL
4492};
4493
4494struct loc_track {
4495 unsigned long max;
4496 unsigned long count;
4497 struct location *loc;
4498};
4499
4500static void free_loc_track(struct loc_track *t)
4501{
4502 if (t->max)
4503 free_pages((unsigned long)t->loc,
4504 get_order(sizeof(struct location) * t->max));
4505}
4506
68dff6a9 4507static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
4508{
4509 struct location *l;
4510 int order;
4511
88a420e4
CL
4512 order = get_order(sizeof(struct location) * max);
4513
68dff6a9 4514 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
4515 if (!l)
4516 return 0;
4517
4518 if (t->count) {
4519 memcpy(l, t->loc, sizeof(struct location) * t->count);
4520 free_loc_track(t);
4521 }
4522 t->max = max;
4523 t->loc = l;
4524 return 1;
4525}
4526
4527static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 4528 const struct track *track)
88a420e4
CL
4529{
4530 long start, end, pos;
4531 struct location *l;
ce71e27c 4532 unsigned long caddr;
45edfa58 4533 unsigned long age = jiffies - track->when;
88a420e4
CL
4534
4535 start = -1;
4536 end = t->count;
4537
4538 for ( ; ; ) {
4539 pos = start + (end - start + 1) / 2;
4540
4541 /*
4542 * There is nothing at "end". If we end up there
4543 * we need to add something to before end.
4544 */
4545 if (pos == end)
4546 break;
4547
4548 caddr = t->loc[pos].addr;
45edfa58
CL
4549 if (track->addr == caddr) {
4550
4551 l = &t->loc[pos];
4552 l->count++;
4553 if (track->when) {
4554 l->sum_time += age;
4555 if (age < l->min_time)
4556 l->min_time = age;
4557 if (age > l->max_time)
4558 l->max_time = age;
4559
4560 if (track->pid < l->min_pid)
4561 l->min_pid = track->pid;
4562 if (track->pid > l->max_pid)
4563 l->max_pid = track->pid;
4564
174596a0
RR
4565 cpumask_set_cpu(track->cpu,
4566 to_cpumask(l->cpus));
45edfa58
CL
4567 }
4568 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4569 return 1;
4570 }
4571
45edfa58 4572 if (track->addr < caddr)
88a420e4
CL
4573 end = pos;
4574 else
4575 start = pos;
4576 }
4577
4578 /*
672bba3a 4579 * Not found. Insert new tracking element.
88a420e4 4580 */
68dff6a9 4581 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
4582 return 0;
4583
4584 l = t->loc + pos;
4585 if (pos < t->count)
4586 memmove(l + 1, l,
4587 (t->count - pos) * sizeof(struct location));
4588 t->count++;
4589 l->count = 1;
45edfa58
CL
4590 l->addr = track->addr;
4591 l->sum_time = age;
4592 l->min_time = age;
4593 l->max_time = age;
4594 l->min_pid = track->pid;
4595 l->max_pid = track->pid;
174596a0
RR
4596 cpumask_clear(to_cpumask(l->cpus));
4597 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa58
CL
4598 nodes_clear(l->nodes);
4599 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4600 return 1;
4601}
4602
4603static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57b 4604 struct page *page, enum track_item alloc,
a5dd5c11 4605 unsigned long *map)
88a420e4 4606{
a973e9dd 4607 void *addr = page_address(page);
88a420e4
CL
4608 void *p;
4609
39b26464 4610 bitmap_zero(map, page->objects);
5f80b13a 4611 get_map(s, page, map);
88a420e4 4612
224a88be 4613 for_each_object(p, s, addr, page->objects)
45edfa58
CL
4614 if (!test_bit(slab_index(p, s, addr), map))
4615 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
4616}
4617
4618static int list_locations(struct kmem_cache *s, char *buf,
4619 enum track_item alloc)
4620{
e374d483 4621 int len = 0;
88a420e4 4622 unsigned long i;
68dff6a9 4623 struct loc_track t = { 0, 0, NULL };
88a420e4 4624 int node;
fa45dc25 4625 struct kmem_cache_node *n;
0684e652 4626 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
88a420e4 4627
bbd7d57b 4628 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
0ee931c4 4629 GFP_KERNEL)) {
0684e652 4630 bitmap_free(map);
68dff6a9 4631 return sprintf(buf, "Out of memory\n");
bbd7d57b 4632 }
88a420e4
CL
4633 /* Push back cpu slabs */
4634 flush_all(s);
4635
fa45dc25 4636 for_each_kmem_cache_node(s, node, n) {
88a420e4
CL
4637 unsigned long flags;
4638 struct page *page;
4639
9e86943b 4640 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
4641 continue;
4642
4643 spin_lock_irqsave(&n->list_lock, flags);
4644 list_for_each_entry(page, &n->partial, lru)
bbd7d57b 4645 process_slab(&t, s, page, alloc, map);
88a420e4 4646 list_for_each_entry(page, &n->full, lru)
bbd7d57b 4647 process_slab(&t, s, page, alloc, map);
88a420e4
CL
4648 spin_unlock_irqrestore(&n->list_lock, flags);
4649 }
4650
4651 for (i = 0; i < t.count; i++) {
45edfa58 4652 struct location *l = &t.loc[i];
88a420e4 4653
9c246247 4654 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4 4655 break;
e374d483 4656 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
4657
4658 if (l->addr)
62c70bce 4659 len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4 4660 else
e374d483 4661 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
4662
4663 if (l->sum_time != l->min_time) {
e374d483 4664 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
4665 l->min_time,
4666 (long)div_u64(l->sum_time, l->count),
4667 l->max_time);
45edfa58 4668 } else
e374d483 4669 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
4670 l->min_time);
4671
4672 if (l->min_pid != l->max_pid)
e374d483 4673 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
4674 l->min_pid, l->max_pid);
4675 else
e374d483 4676 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
4677 l->min_pid);
4678
174596a0
RR
4679 if (num_online_cpus() > 1 &&
4680 !cpumask_empty(to_cpumask(l->cpus)) &&
5024c1d7
TH
4681 len < PAGE_SIZE - 60)
4682 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4683 " cpus=%*pbl",
4684 cpumask_pr_args(to_cpumask(l->cpus)));
45edfa58 4685
62bc62a8 4686 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
5024c1d7
TH
4687 len < PAGE_SIZE - 60)
4688 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4689 " nodes=%*pbl",
4690 nodemask_pr_args(&l->nodes));
45edfa58 4691
e374d483 4692 len += sprintf(buf + len, "\n");
88a420e4
CL
4693 }
4694
4695 free_loc_track(&t);
0684e652 4696 bitmap_free(map);
88a420e4 4697 if (!t.count)
e374d483
HH
4698 len += sprintf(buf, "No data\n");
4699 return len;
88a420e4 4700}
ab4d5ed5 4701#endif
88a420e4 4702
a5a84755 4703#ifdef SLUB_RESILIENCY_TEST
c07b8183 4704static void __init resiliency_test(void)
a5a84755
CL
4705{
4706 u8 *p;
cc252eae 4707 int type = KMALLOC_NORMAL;
a5a84755 4708
95a05b42 4709 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
a5a84755 4710
f9f58285
FF
4711 pr_err("SLUB resiliency testing\n");
4712 pr_err("-----------------------\n");
4713 pr_err("A. Corruption after allocation\n");
a5a84755
CL
4714
4715 p = kzalloc(16, GFP_KERNEL);
4716 p[16] = 0x12;
f9f58285
FF
4717 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4718 p + 16);
a5a84755 4719
cc252eae 4720 validate_slab_cache(kmalloc_caches[type][4]);
a5a84755
CL
4721
4722 /* Hmmm... The next two are dangerous */
4723 p = kzalloc(32, GFP_KERNEL);
4724 p[32 + sizeof(void *)] = 0x34;
f9f58285
FF
4725 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4726 p);
4727 pr_err("If allocated object is overwritten then not detectable\n\n");
a5a84755 4728
cc252eae 4729 validate_slab_cache(kmalloc_caches[type][5]);
a5a84755
CL
4730 p = kzalloc(64, GFP_KERNEL);
4731 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4732 *p = 0x56;
f9f58285
FF
4733 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4734 p);
4735 pr_err("If allocated object is overwritten then not detectable\n\n");
cc252eae 4736 validate_slab_cache(kmalloc_caches[type][6]);
a5a84755 4737
f9f58285 4738 pr_err("\nB. Corruption after free\n");
a5a84755
CL
4739 p = kzalloc(128, GFP_KERNEL);
4740 kfree(p);
4741 *p = 0x78;
f9f58285 4742 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
cc252eae 4743 validate_slab_cache(kmalloc_caches[type][7]);
a5a84755
CL
4744
4745 p = kzalloc(256, GFP_KERNEL);
4746 kfree(p);
4747 p[50] = 0x9a;
f9f58285 4748 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
cc252eae 4749 validate_slab_cache(kmalloc_caches[type][8]);
a5a84755
CL
4750
4751 p = kzalloc(512, GFP_KERNEL);
4752 kfree(p);
4753 p[512] = 0xab;
f9f58285 4754 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
cc252eae 4755 validate_slab_cache(kmalloc_caches[type][9]);
a5a84755
CL
4756}
4757#else
4758#ifdef CONFIG_SYSFS
4759static void resiliency_test(void) {};
4760#endif
4761#endif
4762
ab4d5ed5 4763#ifdef CONFIG_SYSFS
81819f0f 4764enum slab_stat_type {
205ab99d
CL
4765 SL_ALL, /* All slabs */
4766 SL_PARTIAL, /* Only partially allocated slabs */
4767 SL_CPU, /* Only slabs used for cpu caches */
4768 SL_OBJECTS, /* Determine allocated objects not slabs */
4769 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
4770};
4771
205ab99d 4772#define SO_ALL (1 << SL_ALL)
81819f0f
CL
4773#define SO_PARTIAL (1 << SL_PARTIAL)
4774#define SO_CPU (1 << SL_CPU)
4775#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 4776#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 4777
1663f26d
TH
4778#ifdef CONFIG_MEMCG
4779static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4780
4781static int __init setup_slub_memcg_sysfs(char *str)
4782{
4783 int v;
4784
4785 if (get_option(&str, &v) > 0)
4786 memcg_sysfs_enabled = v;
4787
4788 return 1;
4789}
4790
4791__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4792#endif
4793
62e5c4b4
CG
4794static ssize_t show_slab_objects(struct kmem_cache *s,
4795 char *buf, unsigned long flags)
81819f0f
CL
4796{
4797 unsigned long total = 0;
81819f0f
CL
4798 int node;
4799 int x;
4800 unsigned long *nodes;
81819f0f 4801
6396bb22 4802 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
62e5c4b4
CG
4803 if (!nodes)
4804 return -ENOMEM;
81819f0f 4805
205ab99d
CL
4806 if (flags & SO_CPU) {
4807 int cpu;
81819f0f 4808
205ab99d 4809 for_each_possible_cpu(cpu) {
d0e0ac97
CG
4810 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4811 cpu);
ec3ab083 4812 int node;
49e22585 4813 struct page *page;
dfb4f096 4814
4db0c3c2 4815 page = READ_ONCE(c->page);
ec3ab083
CL
4816 if (!page)
4817 continue;
205ab99d 4818
ec3ab083
CL
4819 node = page_to_nid(page);
4820 if (flags & SO_TOTAL)
4821 x = page->objects;
4822 else if (flags & SO_OBJECTS)
4823 x = page->inuse;
4824 else
4825 x = 1;
49e22585 4826
ec3ab083
CL
4827 total += x;
4828 nodes[node] += x;
4829
a93cf07b 4830 page = slub_percpu_partial_read_once(c);
49e22585 4831 if (page) {
8afb1474
LZ
4832 node = page_to_nid(page);
4833 if (flags & SO_TOTAL)
4834 WARN_ON_ONCE(1);
4835 else if (flags & SO_OBJECTS)
4836 WARN_ON_ONCE(1);
4837 else
4838 x = page->pages;
bc6697d8
ED
4839 total += x;
4840 nodes[node] += x;
49e22585 4841 }
81819f0f
CL
4842 }
4843 }
4844
bfc8c901 4845 get_online_mems();
ab4d5ed5 4846#ifdef CONFIG_SLUB_DEBUG
205ab99d 4847 if (flags & SO_ALL) {
fa45dc25
CL
4848 struct kmem_cache_node *n;
4849
4850 for_each_kmem_cache_node(s, node, n) {
205ab99d 4851
d0e0ac97
CG
4852 if (flags & SO_TOTAL)
4853 x = atomic_long_read(&n->total_objects);
4854 else if (flags & SO_OBJECTS)
4855 x = atomic_long_read(&n->total_objects) -
4856 count_partial(n, count_free);
81819f0f 4857 else
205ab99d 4858 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
4859 total += x;
4860 nodes[node] += x;
4861 }
4862
ab4d5ed5
CL
4863 } else
4864#endif
4865 if (flags & SO_PARTIAL) {
fa45dc25 4866 struct kmem_cache_node *n;
81819f0f 4867
fa45dc25 4868 for_each_kmem_cache_node(s, node, n) {
205ab99d
CL
4869 if (flags & SO_TOTAL)
4870 x = count_partial(n, count_total);
4871 else if (flags & SO_OBJECTS)
4872 x = count_partial(n, count_inuse);
81819f0f 4873 else
205ab99d 4874 x = n->nr_partial;
81819f0f
CL
4875 total += x;
4876 nodes[node] += x;
4877 }
4878 }
81819f0f
CL
4879 x = sprintf(buf, "%lu", total);
4880#ifdef CONFIG_NUMA
fa45dc25 4881 for (node = 0; node < nr_node_ids; node++)
81819f0f
CL
4882 if (nodes[node])
4883 x += sprintf(buf + x, " N%d=%lu",
4884 node, nodes[node]);
4885#endif
bfc8c901 4886 put_online_mems();
81819f0f
CL
4887 kfree(nodes);
4888 return x + sprintf(buf + x, "\n");
4889}
4890
ab4d5ed5 4891#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
4892static int any_slab_objects(struct kmem_cache *s)
4893{
4894 int node;
fa45dc25 4895 struct kmem_cache_node *n;
81819f0f 4896
fa45dc25 4897 for_each_kmem_cache_node(s, node, n)
4ea33e2d 4898 if (atomic_long_read(&n->total_objects))
81819f0f 4899 return 1;
fa45dc25 4900
81819f0f
CL
4901 return 0;
4902}
ab4d5ed5 4903#endif
81819f0f
CL
4904
4905#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf 4906#define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0f
CL
4907
4908struct slab_attribute {
4909 struct attribute attr;
4910 ssize_t (*show)(struct kmem_cache *s, char *buf);
4911 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4912};
4913
4914#define SLAB_ATTR_RO(_name) \
ab067e99
VK
4915 static struct slab_attribute _name##_attr = \
4916 __ATTR(_name, 0400, _name##_show, NULL)
81819f0f
CL
4917
4918#define SLAB_ATTR(_name) \
4919 static struct slab_attribute _name##_attr = \
ab067e99 4920 __ATTR(_name, 0600, _name##_show, _name##_store)
81819f0f 4921
81819f0f
CL
4922static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4923{
44065b2e 4924 return sprintf(buf, "%u\n", s->size);
81819f0f
CL
4925}
4926SLAB_ATTR_RO(slab_size);
4927
4928static ssize_t align_show(struct kmem_cache *s, char *buf)
4929{
3a3791ec 4930 return sprintf(buf, "%u\n", s->align);
81819f0f
CL
4931}
4932SLAB_ATTR_RO(align);
4933
4934static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4935{
1b473f29 4936 return sprintf(buf, "%u\n", s->object_size);
81819f0f
CL
4937}
4938SLAB_ATTR_RO(object_size);
4939
4940static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4941{
19af27af 4942 return sprintf(buf, "%u\n", oo_objects(s->oo));
81819f0f
CL
4943}
4944SLAB_ATTR_RO(objs_per_slab);
4945
06b285dc
CL
4946static ssize_t order_store(struct kmem_cache *s,
4947 const char *buf, size_t length)
4948{
19af27af 4949 unsigned int order;
0121c619
CL
4950 int err;
4951
19af27af 4952 err = kstrtouint(buf, 10, &order);
0121c619
CL
4953 if (err)
4954 return err;
06b285dc
CL
4955
4956 if (order > slub_max_order || order < slub_min_order)
4957 return -EINVAL;
4958
4959 calculate_sizes(s, order);
4960 return length;
4961}
4962
81819f0f
CL
4963static ssize_t order_show(struct kmem_cache *s, char *buf)
4964{
19af27af 4965 return sprintf(buf, "%u\n", oo_order(s->oo));
81819f0f 4966}
06b285dc 4967SLAB_ATTR(order);
81819f0f 4968
73d342b1
DR
4969static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4970{
4971 return sprintf(buf, "%lu\n", s->min_partial);
4972}
4973
4974static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4975 size_t length)
4976{
4977 unsigned long min;
4978 int err;
4979
3dbb95f7 4980 err = kstrtoul(buf, 10, &min);
73d342b1
DR
4981 if (err)
4982 return err;
4983
c0bdb232 4984 set_min_partial(s, min);
73d342b1
DR
4985 return length;
4986}
4987SLAB_ATTR(min_partial);
4988
49e22585
CL
4989static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4990{
e6d0e1dc 4991 return sprintf(buf, "%u\n", slub_cpu_partial(s));
49e22585
CL
4992}
4993
4994static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4995 size_t length)
4996{
e5d9998f 4997 unsigned int objects;
49e22585
CL
4998 int err;
4999
e5d9998f 5000 err = kstrtouint(buf, 10, &objects);
49e22585
CL
5001 if (err)
5002 return err;
345c905d 5003 if (objects && !kmem_cache_has_cpu_partial(s))
74ee4ef1 5004 return -EINVAL;
49e22585 5005
e6d0e1dc 5006 slub_set_cpu_partial(s, objects);
49e22585
CL
5007 flush_all(s);
5008 return length;
5009}
5010SLAB_ATTR(cpu_partial);
5011
81819f0f
CL
5012static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5013{
62c70bce
JP
5014 if (!s->ctor)
5015 return 0;
5016 return sprintf(buf, "%pS\n", s->ctor);
81819f0f
CL
5017}
5018SLAB_ATTR_RO(ctor);
5019
81819f0f
CL
5020static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5021{
4307c14f 5022 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
81819f0f
CL
5023}
5024SLAB_ATTR_RO(aliases);
5025
81819f0f
CL
5026static ssize_t partial_show(struct kmem_cache *s, char *buf)
5027{
d9acf4b7 5028 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
5029}
5030SLAB_ATTR_RO(partial);
5031
5032static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5033{
d9acf4b7 5034 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
5035}
5036SLAB_ATTR_RO(cpu_slabs);
5037
5038static ssize_t objects_show(struct kmem_cache *s, char *buf)
5039{
205ab99d 5040 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
5041}
5042SLAB_ATTR_RO(objects);
5043
205ab99d
CL
5044static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5045{
5046 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5047}
5048SLAB_ATTR_RO(objects_partial);
5049
49e22585
CL
5050static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5051{
5052 int objects = 0;
5053 int pages = 0;
5054 int cpu;
5055 int len;
5056
5057 for_each_online_cpu(cpu) {
a93cf07b
WY
5058 struct page *page;
5059
5060 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5061
5062 if (page) {
5063 pages += page->pages;
5064 objects += page->pobjects;
5065 }
5066 }
5067
5068 len = sprintf(buf, "%d(%d)", objects, pages);
5069
5070#ifdef CONFIG_SMP
5071 for_each_online_cpu(cpu) {
a93cf07b
WY
5072 struct page *page;
5073
5074 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5075
5076 if (page && len < PAGE_SIZE - 20)
5077 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5078 page->pobjects, page->pages);
5079 }
5080#endif
5081 return len + sprintf(buf + len, "\n");
5082}
5083SLAB_ATTR_RO(slabs_cpu_partial);
5084
a5a84755
CL
5085static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5086{
5087 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5088}
5089
5090static ssize_t reclaim_account_store(struct kmem_cache *s,
5091 const char *buf, size_t length)
5092{
5093 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5094 if (buf[0] == '1')
5095 s->flags |= SLAB_RECLAIM_ACCOUNT;
5096 return length;
5097}
5098SLAB_ATTR(reclaim_account);
5099
5100static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5101{
5102 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5103}
5104SLAB_ATTR_RO(hwcache_align);
5105
5106#ifdef CONFIG_ZONE_DMA
5107static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5108{
5109 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5110}
5111SLAB_ATTR_RO(cache_dma);
5112#endif
5113
8eb8284b
DW
5114static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5115{
7bbdb81e 5116 return sprintf(buf, "%u\n", s->usersize);
8eb8284b
DW
5117}
5118SLAB_ATTR_RO(usersize);
5119
a5a84755
CL
5120static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5121{
5f0d5a3a 5122 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
a5a84755
CL
5123}
5124SLAB_ATTR_RO(destroy_by_rcu);
5125
ab4d5ed5 5126#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5127static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5128{
5129 return show_slab_objects(s, buf, SO_ALL);
5130}
5131SLAB_ATTR_RO(slabs);
5132
205ab99d
CL
5133static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5134{
5135 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5136}
5137SLAB_ATTR_RO(total_objects);
5138
81819f0f
CL
5139static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5140{
becfda68 5141 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
81819f0f
CL
5142}
5143
5144static ssize_t sanity_checks_store(struct kmem_cache *s,
5145 const char *buf, size_t length)
5146{
becfda68 5147 s->flags &= ~SLAB_CONSISTENCY_CHECKS;
b789ef51
CL
5148 if (buf[0] == '1') {
5149 s->flags &= ~__CMPXCHG_DOUBLE;
becfda68 5150 s->flags |= SLAB_CONSISTENCY_CHECKS;
b789ef51 5151 }
81819f0f
CL
5152 return length;
5153}
5154SLAB_ATTR(sanity_checks);
5155
5156static ssize_t trace_show(struct kmem_cache *s, char *buf)
5157{
5158 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5159}
5160
5161static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5162 size_t length)
5163{
c9e16131
CL
5164 /*
5165 * Tracing a merged cache is going to give confusing results
5166 * as well as cause other issues like converting a mergeable
5167 * cache into an umergeable one.
5168 */
5169 if (s->refcount > 1)
5170 return -EINVAL;
5171
81819f0f 5172 s->flags &= ~SLAB_TRACE;
b789ef51
CL
5173 if (buf[0] == '1') {
5174 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5175 s->flags |= SLAB_TRACE;
b789ef51 5176 }
81819f0f
CL
5177 return length;
5178}
5179SLAB_ATTR(trace);
5180
81819f0f
CL
5181static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5182{
5183 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5184}
5185
5186static ssize_t red_zone_store(struct kmem_cache *s,
5187 const char *buf, size_t length)
5188{
5189 if (any_slab_objects(s))
5190 return -EBUSY;
5191
5192 s->flags &= ~SLAB_RED_ZONE;
b789ef51 5193 if (buf[0] == '1') {
81819f0f 5194 s->flags |= SLAB_RED_ZONE;
b789ef51 5195 }
06b285dc 5196 calculate_sizes(s, -1);
81819f0f
CL
5197 return length;
5198}
5199SLAB_ATTR(red_zone);
5200
5201static ssize_t poison_show(struct kmem_cache *s, char *buf)
5202{
5203 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5204}
5205
5206static ssize_t poison_store(struct kmem_cache *s,
5207 const char *buf, size_t length)
5208{
5209 if (any_slab_objects(s))
5210 return -EBUSY;
5211
5212 s->flags &= ~SLAB_POISON;
b789ef51 5213 if (buf[0] == '1') {
81819f0f 5214 s->flags |= SLAB_POISON;
b789ef51 5215 }
06b285dc 5216 calculate_sizes(s, -1);
81819f0f
CL
5217 return length;
5218}
5219SLAB_ATTR(poison);
5220
5221static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5222{
5223 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5224}
5225
5226static ssize_t store_user_store(struct kmem_cache *s,
5227 const char *buf, size_t length)
5228{
5229 if (any_slab_objects(s))
5230 return -EBUSY;
5231
5232 s->flags &= ~SLAB_STORE_USER;
b789ef51
CL
5233 if (buf[0] == '1') {
5234 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5235 s->flags |= SLAB_STORE_USER;
b789ef51 5236 }
06b285dc 5237 calculate_sizes(s, -1);
81819f0f
CL
5238 return length;
5239}
5240SLAB_ATTR(store_user);
5241
53e15af0
CL
5242static ssize_t validate_show(struct kmem_cache *s, char *buf)
5243{
5244 return 0;
5245}
5246
5247static ssize_t validate_store(struct kmem_cache *s,
5248 const char *buf, size_t length)
5249{
434e245d
CL
5250 int ret = -EINVAL;
5251
5252 if (buf[0] == '1') {
5253 ret = validate_slab_cache(s);
5254 if (ret >= 0)
5255 ret = length;
5256 }
5257 return ret;
53e15af0
CL
5258}
5259SLAB_ATTR(validate);
a5a84755
CL
5260
5261static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5262{
5263 if (!(s->flags & SLAB_STORE_USER))
5264 return -ENOSYS;
5265 return list_locations(s, buf, TRACK_ALLOC);
5266}
5267SLAB_ATTR_RO(alloc_calls);
5268
5269static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5270{
5271 if (!(s->flags & SLAB_STORE_USER))
5272 return -ENOSYS;
5273 return list_locations(s, buf, TRACK_FREE);
5274}
5275SLAB_ATTR_RO(free_calls);
5276#endif /* CONFIG_SLUB_DEBUG */
5277
5278#ifdef CONFIG_FAILSLAB
5279static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5280{
5281 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5282}
5283
5284static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5285 size_t length)
5286{
c9e16131
CL
5287 if (s->refcount > 1)
5288 return -EINVAL;
5289
a5a84755
CL
5290 s->flags &= ~SLAB_FAILSLAB;
5291 if (buf[0] == '1')
5292 s->flags |= SLAB_FAILSLAB;
5293 return length;
5294}
5295SLAB_ATTR(failslab);
ab4d5ed5 5296#endif
53e15af0 5297
2086d26a
CL
5298static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5299{
5300 return 0;
5301}
5302
5303static ssize_t shrink_store(struct kmem_cache *s,
5304 const char *buf, size_t length)
5305{
832f37f5
VD
5306 if (buf[0] == '1')
5307 kmem_cache_shrink(s);
5308 else
2086d26a
CL
5309 return -EINVAL;
5310 return length;
5311}
5312SLAB_ATTR(shrink);
5313
81819f0f 5314#ifdef CONFIG_NUMA
9824601e 5315static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 5316{
eb7235eb 5317 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
5318}
5319
9824601e 5320static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
5321 const char *buf, size_t length)
5322{
eb7235eb 5323 unsigned int ratio;
0121c619
CL
5324 int err;
5325
eb7235eb 5326 err = kstrtouint(buf, 10, &ratio);
0121c619
CL
5327 if (err)
5328 return err;
eb7235eb
AD
5329 if (ratio > 100)
5330 return -ERANGE;
0121c619 5331
eb7235eb 5332 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 5333
81819f0f
CL
5334 return length;
5335}
9824601e 5336SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
5337#endif
5338
8ff12cfc 5339#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
5340static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5341{
5342 unsigned long sum = 0;
5343 int cpu;
5344 int len;
6da2ec56 5345 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
8ff12cfc
CL
5346
5347 if (!data)
5348 return -ENOMEM;
5349
5350 for_each_online_cpu(cpu) {
9dfc6e68 5351 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc
CL
5352
5353 data[cpu] = x;
5354 sum += x;
5355 }
5356
5357 len = sprintf(buf, "%lu", sum);
5358
50ef37b9 5359#ifdef CONFIG_SMP
8ff12cfc
CL
5360 for_each_online_cpu(cpu) {
5361 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 5362 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 5363 }
50ef37b9 5364#endif
8ff12cfc
CL
5365 kfree(data);
5366 return len + sprintf(buf + len, "\n");
5367}
5368
78eb00cc
DR
5369static void clear_stat(struct kmem_cache *s, enum stat_item si)
5370{
5371 int cpu;
5372
5373 for_each_online_cpu(cpu)
9dfc6e68 5374 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc
DR
5375}
5376
8ff12cfc
CL
5377#define STAT_ATTR(si, text) \
5378static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5379{ \
5380 return show_stat(s, buf, si); \
5381} \
78eb00cc
DR
5382static ssize_t text##_store(struct kmem_cache *s, \
5383 const char *buf, size_t length) \
5384{ \
5385 if (buf[0] != '0') \
5386 return -EINVAL; \
5387 clear_stat(s, si); \
5388 return length; \
5389} \
5390SLAB_ATTR(text); \
8ff12cfc
CL
5391
5392STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5393STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5394STAT_ATTR(FREE_FASTPATH, free_fastpath);
5395STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5396STAT_ATTR(FREE_FROZEN, free_frozen);
5397STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5398STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5399STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5400STAT_ATTR(ALLOC_SLAB, alloc_slab);
5401STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652 5402STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc
CL
5403STAT_ATTR(FREE_SLAB, free_slab);
5404STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5405STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5406STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5407STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5408STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5409STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af 5410STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376a 5411STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef51
CL
5412STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5413STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e22585
CL
5414STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5415STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8028dcea
AS
5416STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5417STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
8ff12cfc
CL
5418#endif
5419
06428780 5420static struct attribute *slab_attrs[] = {
81819f0f
CL
5421 &slab_size_attr.attr,
5422 &object_size_attr.attr,
5423 &objs_per_slab_attr.attr,
5424 &order_attr.attr,
73d342b1 5425 &min_partial_attr.attr,
49e22585 5426 &cpu_partial_attr.attr,
81819f0f 5427 &objects_attr.attr,
205ab99d 5428 &objects_partial_attr.attr,
81819f0f
CL
5429 &partial_attr.attr,
5430 &cpu_slabs_attr.attr,
5431 &ctor_attr.attr,
81819f0f
CL
5432 &aliases_attr.attr,
5433 &align_attr.attr,
81819f0f
CL
5434 &hwcache_align_attr.attr,
5435 &reclaim_account_attr.attr,
5436 &destroy_by_rcu_attr.attr,
a5a84755 5437 &shrink_attr.attr,
49e22585 5438 &slabs_cpu_partial_attr.attr,
ab4d5ed5 5439#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5440 &total_objects_attr.attr,
5441 &slabs_attr.attr,
5442 &sanity_checks_attr.attr,
5443 &trace_attr.attr,
81819f0f
CL
5444 &red_zone_attr.attr,
5445 &poison_attr.attr,
5446 &store_user_attr.attr,
53e15af0 5447 &validate_attr.attr,
88a420e4
CL
5448 &alloc_calls_attr.attr,
5449 &free_calls_attr.attr,
ab4d5ed5 5450#endif
81819f0f
CL
5451#ifdef CONFIG_ZONE_DMA
5452 &cache_dma_attr.attr,
5453#endif
5454#ifdef CONFIG_NUMA
9824601e 5455 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
5456#endif
5457#ifdef CONFIG_SLUB_STATS
5458 &alloc_fastpath_attr.attr,
5459 &alloc_slowpath_attr.attr,
5460 &free_fastpath_attr.attr,
5461 &free_slowpath_attr.attr,
5462 &free_frozen_attr.attr,
5463 &free_add_partial_attr.attr,
5464 &free_remove_partial_attr.attr,
5465 &alloc_from_partial_attr.attr,
5466 &alloc_slab_attr.attr,
5467 &alloc_refill_attr.attr,
e36a2652 5468 &alloc_node_mismatch_attr.attr,
8ff12cfc
CL
5469 &free_slab_attr.attr,
5470 &cpuslab_flush_attr.attr,
5471 &deactivate_full_attr.attr,
5472 &deactivate_empty_attr.attr,
5473 &deactivate_to_head_attr.attr,
5474 &deactivate_to_tail_attr.attr,
5475 &deactivate_remote_frees_attr.attr,
03e404af 5476 &deactivate_bypass_attr.attr,
65c3376a 5477 &order_fallback_attr.attr,
b789ef51
CL
5478 &cmpxchg_double_fail_attr.attr,
5479 &cmpxchg_double_cpu_fail_attr.attr,
49e22585
CL
5480 &cpu_partial_alloc_attr.attr,
5481 &cpu_partial_free_attr.attr,
8028dcea
AS
5482 &cpu_partial_node_attr.attr,
5483 &cpu_partial_drain_attr.attr,
81819f0f 5484#endif
4c13dd3b
DM
5485#ifdef CONFIG_FAILSLAB
5486 &failslab_attr.attr,
5487#endif
8eb8284b 5488 &usersize_attr.attr,
4c13dd3b 5489
81819f0f
CL
5490 NULL
5491};
5492
1fdaaa23 5493static const struct attribute_group slab_attr_group = {
81819f0f
CL
5494 .attrs = slab_attrs,
5495};
5496
5497static ssize_t slab_attr_show(struct kobject *kobj,
5498 struct attribute *attr,
5499 char *buf)
5500{
5501 struct slab_attribute *attribute;
5502 struct kmem_cache *s;
5503 int err;
5504
5505 attribute = to_slab_attr(attr);
5506 s = to_slab(kobj);
5507
5508 if (!attribute->show)
5509 return -EIO;
5510
5511 err = attribute->show(s, buf);
5512
5513 return err;
5514}
5515
5516static ssize_t slab_attr_store(struct kobject *kobj,
5517 struct attribute *attr,
5518 const char *buf, size_t len)
5519{
5520 struct slab_attribute *attribute;
5521 struct kmem_cache *s;
5522 int err;
5523
5524 attribute = to_slab_attr(attr);
5525 s = to_slab(kobj);
5526
5527 if (!attribute->store)
5528 return -EIO;
5529
5530 err = attribute->store(s, buf, len);
127424c8 5531#ifdef CONFIG_MEMCG
107dab5c 5532 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
426589f5 5533 struct kmem_cache *c;
81819f0f 5534
107dab5c
GC
5535 mutex_lock(&slab_mutex);
5536 if (s->max_attr_size < len)
5537 s->max_attr_size = len;
5538
ebe945c2
GC
5539 /*
5540 * This is a best effort propagation, so this function's return
5541 * value will be determined by the parent cache only. This is
5542 * basically because not all attributes will have a well
5543 * defined semantics for rollbacks - most of the actions will
5544 * have permanent effects.
5545 *
5546 * Returning the error value of any of the children that fail
5547 * is not 100 % defined, in the sense that users seeing the
5548 * error code won't be able to know anything about the state of
5549 * the cache.
5550 *
5551 * Only returning the error code for the parent cache at least
5552 * has well defined semantics. The cache being written to
5553 * directly either failed or succeeded, in which case we loop
5554 * through the descendants with best-effort propagation.
5555 */
426589f5
VD
5556 for_each_memcg_cache(c, s)
5557 attribute->store(c, buf, len);
107dab5c
GC
5558 mutex_unlock(&slab_mutex);
5559 }
5560#endif
81819f0f
CL
5561 return err;
5562}
5563
107dab5c
GC
5564static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5565{
127424c8 5566#ifdef CONFIG_MEMCG
107dab5c
GC
5567 int i;
5568 char *buffer = NULL;
93030d83 5569 struct kmem_cache *root_cache;
107dab5c 5570
93030d83 5571 if (is_root_cache(s))
107dab5c
GC
5572 return;
5573
f7ce3190 5574 root_cache = s->memcg_params.root_cache;
93030d83 5575
107dab5c
GC
5576 /*
5577 * This mean this cache had no attribute written. Therefore, no point
5578 * in copying default values around
5579 */
93030d83 5580 if (!root_cache->max_attr_size)
107dab5c
GC
5581 return;
5582
5583 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5584 char mbuf[64];
5585 char *buf;
5586 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
478fe303 5587 ssize_t len;
107dab5c
GC
5588
5589 if (!attr || !attr->store || !attr->show)
5590 continue;
5591
5592 /*
5593 * It is really bad that we have to allocate here, so we will
5594 * do it only as a fallback. If we actually allocate, though,
5595 * we can just use the allocated buffer until the end.
5596 *
5597 * Most of the slub attributes will tend to be very small in
5598 * size, but sysfs allows buffers up to a page, so they can
5599 * theoretically happen.
5600 */
5601 if (buffer)
5602 buf = buffer;
93030d83 5603 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
107dab5c
GC
5604 buf = mbuf;
5605 else {
5606 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5607 if (WARN_ON(!buffer))
5608 continue;
5609 buf = buffer;
5610 }
5611
478fe303
TG
5612 len = attr->show(root_cache, buf);
5613 if (len > 0)
5614 attr->store(s, buf, len);
107dab5c
GC
5615 }
5616
5617 if (buffer)
5618 free_page((unsigned long)buffer);
5619#endif
5620}
5621
41a21285
CL
5622static void kmem_cache_release(struct kobject *k)
5623{
5624 slab_kmem_cache_release(to_slab(k));
5625}
5626
52cf25d0 5627static const struct sysfs_ops slab_sysfs_ops = {
81819f0f
CL
5628 .show = slab_attr_show,
5629 .store = slab_attr_store,
5630};
5631
5632static struct kobj_type slab_ktype = {
5633 .sysfs_ops = &slab_sysfs_ops,
41a21285 5634 .release = kmem_cache_release,
81819f0f
CL
5635};
5636
5637static int uevent_filter(struct kset *kset, struct kobject *kobj)
5638{
5639 struct kobj_type *ktype = get_ktype(kobj);
5640
5641 if (ktype == &slab_ktype)
5642 return 1;
5643 return 0;
5644}
5645
9cd43611 5646static const struct kset_uevent_ops slab_uevent_ops = {
81819f0f
CL
5647 .filter = uevent_filter,
5648};
5649
27c3a314 5650static struct kset *slab_kset;
81819f0f 5651
9a41707b
VD
5652static inline struct kset *cache_kset(struct kmem_cache *s)
5653{
127424c8 5654#ifdef CONFIG_MEMCG
9a41707b 5655 if (!is_root_cache(s))
f7ce3190 5656 return s->memcg_params.root_cache->memcg_kset;
9a41707b
VD
5657#endif
5658 return slab_kset;
5659}
5660
81819f0f
CL
5661#define ID_STR_LENGTH 64
5662
5663/* Create a unique string id for a slab cache:
6446faa2
CL
5664 *
5665 * Format :[flags-]size
81819f0f
CL
5666 */
5667static char *create_unique_id(struct kmem_cache *s)
5668{
5669 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5670 char *p = name;
5671
5672 BUG_ON(!name);
5673
5674 *p++ = ':';
5675 /*
5676 * First flags affecting slabcache operations. We will only
5677 * get here for aliasable slabs so we do not need to support
5678 * too many flags. The flags here must cover all flags that
5679 * are matched during merging to guarantee that the id is
5680 * unique.
5681 */
5682 if (s->flags & SLAB_CACHE_DMA)
5683 *p++ = 'd';
5684 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5685 *p++ = 'a';
becfda68 5686 if (s->flags & SLAB_CONSISTENCY_CHECKS)
81819f0f 5687 *p++ = 'F';
230e9fc2
VD
5688 if (s->flags & SLAB_ACCOUNT)
5689 *p++ = 'A';
81819f0f
CL
5690 if (p != name + 1)
5691 *p++ = '-';
44065b2e 5692 p += sprintf(p, "%07u", s->size);
2633d7a0 5693
81819f0f
CL
5694 BUG_ON(p > name + ID_STR_LENGTH - 1);
5695 return name;
5696}
5697
3b7b3140
TH
5698static void sysfs_slab_remove_workfn(struct work_struct *work)
5699{
5700 struct kmem_cache *s =
5701 container_of(work, struct kmem_cache, kobj_remove_work);
5702
5703 if (!s->kobj.state_in_sysfs)
5704 /*
5705 * For a memcg cache, this may be called during
5706 * deactivation and again on shutdown. Remove only once.
5707 * A cache is never shut down before deactivation is
5708 * complete, so no need to worry about synchronization.
5709 */
f6ba4880 5710 goto out;
3b7b3140
TH
5711
5712#ifdef CONFIG_MEMCG
5713 kset_unregister(s->memcg_kset);
5714#endif
5715 kobject_uevent(&s->kobj, KOBJ_REMOVE);
f6ba4880 5716out:
3b7b3140
TH
5717 kobject_put(&s->kobj);
5718}
5719
81819f0f
CL
5720static int sysfs_slab_add(struct kmem_cache *s)
5721{
5722 int err;
5723 const char *name;
1663f26d 5724 struct kset *kset = cache_kset(s);
45530c44 5725 int unmergeable = slab_unmergeable(s);
81819f0f 5726
3b7b3140
TH
5727 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5728
1663f26d
TH
5729 if (!kset) {
5730 kobject_init(&s->kobj, &slab_ktype);
5731 return 0;
5732 }
5733
11066386
MC
5734 if (!unmergeable && disable_higher_order_debug &&
5735 (slub_debug & DEBUG_METADATA_FLAGS))
5736 unmergeable = 1;
5737
81819f0f
CL
5738 if (unmergeable) {
5739 /*
5740 * Slabcache can never be merged so we can use the name proper.
5741 * This is typically the case for debug situations. In that
5742 * case we can catch duplicate names easily.
5743 */
27c3a314 5744 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
5745 name = s->name;
5746 } else {
5747 /*
5748 * Create a unique name for the slab as a target
5749 * for the symlinks.
5750 */
5751 name = create_unique_id(s);
5752 }
5753
1663f26d 5754 s->kobj.kset = kset;
26e4f205 5755 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
54b6a731 5756 if (err)
80da026a 5757 goto out;
81819f0f
CL
5758
5759 err = sysfs_create_group(&s->kobj, &slab_attr_group);
54b6a731
DJ
5760 if (err)
5761 goto out_del_kobj;
9a41707b 5762
127424c8 5763#ifdef CONFIG_MEMCG
1663f26d 5764 if (is_root_cache(s) && memcg_sysfs_enabled) {
9a41707b
VD
5765 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5766 if (!s->memcg_kset) {
54b6a731
DJ
5767 err = -ENOMEM;
5768 goto out_del_kobj;
9a41707b
VD
5769 }
5770 }
5771#endif
5772
81819f0f
CL
5773 kobject_uevent(&s->kobj, KOBJ_ADD);
5774 if (!unmergeable) {
5775 /* Setup first alias */
5776 sysfs_slab_alias(s, s->name);
81819f0f 5777 }
54b6a731
DJ
5778out:
5779 if (!unmergeable)
5780 kfree(name);
5781 return err;
5782out_del_kobj:
5783 kobject_del(&s->kobj);
54b6a731 5784 goto out;
81819f0f
CL
5785}
5786
bf5eb3de 5787static void sysfs_slab_remove(struct kmem_cache *s)
81819f0f 5788{
97d06609 5789 if (slab_state < FULL)
2bce6485
CL
5790 /*
5791 * Sysfs has not been setup yet so no need to remove the
5792 * cache from sysfs.
5793 */
5794 return;
5795
3b7b3140
TH
5796 kobject_get(&s->kobj);
5797 schedule_work(&s->kobj_remove_work);
bf5eb3de
TH
5798}
5799
d50d82fa
MP
5800void sysfs_slab_unlink(struct kmem_cache *s)
5801{
5802 if (slab_state >= FULL)
5803 kobject_del(&s->kobj);
5804}
5805
bf5eb3de
TH
5806void sysfs_slab_release(struct kmem_cache *s)
5807{
5808 if (slab_state >= FULL)
5809 kobject_put(&s->kobj);
81819f0f
CL
5810}
5811
5812/*
5813 * Need to buffer aliases during bootup until sysfs becomes
9f6c708e 5814 * available lest we lose that information.
81819f0f
CL
5815 */
5816struct saved_alias {
5817 struct kmem_cache *s;
5818 const char *name;
5819 struct saved_alias *next;
5820};
5821
5af328a5 5822static struct saved_alias *alias_list;
81819f0f
CL
5823
5824static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5825{
5826 struct saved_alias *al;
5827
97d06609 5828 if (slab_state == FULL) {
81819f0f
CL
5829 /*
5830 * If we have a leftover link then remove it.
5831 */
27c3a314
GKH
5832 sysfs_remove_link(&slab_kset->kobj, name);
5833 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
5834 }
5835
5836 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5837 if (!al)
5838 return -ENOMEM;
5839
5840 al->s = s;
5841 al->name = name;
5842 al->next = alias_list;
5843 alias_list = al;
5844 return 0;
5845}
5846
5847static int __init slab_sysfs_init(void)
5848{
5b95a4ac 5849 struct kmem_cache *s;
81819f0f
CL
5850 int err;
5851
18004c5d 5852 mutex_lock(&slab_mutex);
2bce6485 5853
0ff21e46 5854 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 5855 if (!slab_kset) {
18004c5d 5856 mutex_unlock(&slab_mutex);
f9f58285 5857 pr_err("Cannot register slab subsystem.\n");
81819f0f
CL
5858 return -ENOSYS;
5859 }
5860
97d06609 5861 slab_state = FULL;
26a7bd03 5862
5b95a4ac 5863 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 5864 err = sysfs_slab_add(s);
5d540fb7 5865 if (err)
f9f58285
FF
5866 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5867 s->name);
26a7bd03 5868 }
81819f0f
CL
5869
5870 while (alias_list) {
5871 struct saved_alias *al = alias_list;
5872
5873 alias_list = alias_list->next;
5874 err = sysfs_slab_alias(al->s, al->name);
5d540fb7 5875 if (err)
f9f58285
FF
5876 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5877 al->name);
81819f0f
CL
5878 kfree(al);
5879 }
5880
18004c5d 5881 mutex_unlock(&slab_mutex);
81819f0f
CL
5882 resiliency_test();
5883 return 0;
5884}
5885
5886__initcall(slab_sysfs_init);
ab4d5ed5 5887#endif /* CONFIG_SYSFS */
57ed3eda
PE
5888
5889/*
5890 * The /proc/slabinfo ABI
5891 */
5b365771 5892#ifdef CONFIG_SLUB_DEBUG
0d7561c6 5893void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57ed3eda 5894{
57ed3eda 5895 unsigned long nr_slabs = 0;
205ab99d
CL
5896 unsigned long nr_objs = 0;
5897 unsigned long nr_free = 0;
57ed3eda 5898 int node;
fa45dc25 5899 struct kmem_cache_node *n;
57ed3eda 5900
fa45dc25 5901 for_each_kmem_cache_node(s, node, n) {
c17fd13e
WL
5902 nr_slabs += node_nr_slabs(n);
5903 nr_objs += node_nr_objs(n);
205ab99d 5904 nr_free += count_partial(n, count_free);
57ed3eda
PE
5905 }
5906
0d7561c6
GC
5907 sinfo->active_objs = nr_objs - nr_free;
5908 sinfo->num_objs = nr_objs;
5909 sinfo->active_slabs = nr_slabs;
5910 sinfo->num_slabs = nr_slabs;
5911 sinfo->objects_per_slab = oo_objects(s->oo);
5912 sinfo->cache_order = oo_order(s->oo);
57ed3eda
PE
5913}
5914
0d7561c6 5915void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7b3c3a50 5916{
7b3c3a50
AD
5917}
5918
b7454ad3
GC
5919ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5920 size_t count, loff_t *ppos)
7b3c3a50 5921{
b7454ad3 5922 return -EIO;
7b3c3a50 5923}
5b365771 5924#endif /* CONFIG_SLUB_DEBUG */