arm64: dts: marvell: mcbin: enable uart headers
[linux-2.6-block.git] / mm / slub.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
81819f0f
CL
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
881db7fb
CL
6 * The allocator synchronizes using per slab locks or atomic operatios
7 * and only uses a centralized lock to manage a pool of partial slabs.
81819f0f 8 *
cde53535 9 * (C) 2007 SGI, Christoph Lameter
881db7fb 10 * (C) 2011 Linux Foundation, Christoph Lameter
81819f0f
CL
11 */
12
13#include <linux/mm.h>
1eb5ac64 14#include <linux/swap.h> /* struct reclaim_state */
81819f0f
CL
15#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/bitops.h>
19#include <linux/slab.h>
97d06609 20#include "slab.h"
7b3c3a50 21#include <linux/proc_fs.h>
3ac38faa 22#include <linux/notifier.h>
81819f0f 23#include <linux/seq_file.h>
a79316c6 24#include <linux/kasan.h>
81819f0f
CL
25#include <linux/cpu.h>
26#include <linux/cpuset.h>
27#include <linux/mempolicy.h>
28#include <linux/ctype.h>
3ac7fe5a 29#include <linux/debugobjects.h>
81819f0f 30#include <linux/kallsyms.h>
b9049e23 31#include <linux/memory.h>
f8bd2258 32#include <linux/math64.h>
773ff60e 33#include <linux/fault-inject.h>
bfa71457 34#include <linux/stacktrace.h>
4de900b4 35#include <linux/prefetch.h>
2633d7a0 36#include <linux/memcontrol.h>
2482ddec 37#include <linux/random.h>
81819f0f 38
4a92379b
RK
39#include <trace/events/kmem.h>
40
072bb0aa
MG
41#include "internal.h"
42
81819f0f
CL
43/*
44 * Lock order:
18004c5d 45 * 1. slab_mutex (Global Mutex)
881db7fb
CL
46 * 2. node->list_lock
47 * 3. slab_lock(page) (Only on some arches and for debugging)
81819f0f 48 *
18004c5d 49 * slab_mutex
881db7fb 50 *
18004c5d 51 * The role of the slab_mutex is to protect the list of all the slabs
881db7fb
CL
52 * and to synchronize major metadata changes to slab cache structures.
53 *
54 * The slab_lock is only used for debugging and on arches that do not
55 * have the ability to do a cmpxchg_double. It only protects the second
56 * double word in the page struct. Meaning
57 * A. page->freelist -> List of object free in a page
58 * B. page->counters -> Counters of objects
59 * C. page->frozen -> frozen state
60 *
61 * If a slab is frozen then it is exempt from list management. It is not
62 * on any list. The processor that froze the slab is the one who can
63 * perform list operations on the page. Other processors may put objects
64 * onto the freelist but the processor that froze the slab is the only
65 * one that can retrieve the objects from the page's freelist.
81819f0f
CL
66 *
67 * The list_lock protects the partial and full list on each node and
68 * the partial slab counter. If taken then no new slabs may be added or
69 * removed from the lists nor make the number of partial slabs be modified.
70 * (Note that the total number of slabs is an atomic value that may be
71 * modified without taking the list lock).
72 *
73 * The list_lock is a centralized lock and thus we avoid taking it as
74 * much as possible. As long as SLUB does not have to handle partial
75 * slabs, operations can continue without any centralized lock. F.e.
76 * allocating a long series of objects that fill up slabs does not require
77 * the list lock.
81819f0f
CL
78 * Interrupts are disabled during allocation and deallocation in order to
79 * make the slab allocator safe to use in the context of an irq. In addition
80 * interrupts are disabled to ensure that the processor does not change
81 * while handling per_cpu slabs, due to kernel preemption.
82 *
83 * SLUB assigns one slab for allocation to each processor.
84 * Allocations only occur from these slabs called cpu slabs.
85 *
672bba3a
CL
86 * Slabs with free elements are kept on a partial list and during regular
87 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 88 * freed then the slab will show up again on the partial lists.
672bba3a
CL
89 * We track full slabs for debugging purposes though because otherwise we
90 * cannot scan all objects.
81819f0f
CL
91 *
92 * Slabs are freed when they become empty. Teardown and setup is
93 * minimal so we rely on the page allocators per cpu caches for
94 * fast frees and allocs.
95 *
96 * Overloading of page flags that are otherwise used for LRU management.
97 *
4b6f0750
CL
98 * PageActive The slab is frozen and exempt from list processing.
99 * This means that the slab is dedicated to a purpose
100 * such as satisfying allocations for a specific
101 * processor. Objects may be freed in the slab while
102 * it is frozen but slab_free will then skip the usual
103 * list operations. It is up to the processor holding
104 * the slab to integrate the slab into the slab lists
105 * when the slab is no longer needed.
106 *
107 * One use of this flag is to mark slabs that are
108 * used for allocations. Then such a slab becomes a cpu
109 * slab. The cpu slab may be equipped with an additional
dfb4f096 110 * freelist that allows lockless access to
894b8788
CL
111 * free objects in addition to the regular freelist
112 * that requires the slab lock.
81819f0f
CL
113 *
114 * PageError Slab requires special handling due to debug
115 * options set. This moves slab handling out of
894b8788 116 * the fast path and disables lockless freelists.
81819f0f
CL
117 */
118
af537b0a
CL
119static inline int kmem_cache_debug(struct kmem_cache *s)
120{
5577bd8a 121#ifdef CONFIG_SLUB_DEBUG
af537b0a 122 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a 123#else
af537b0a 124 return 0;
5577bd8a 125#endif
af537b0a 126}
5577bd8a 127
117d54df 128void *fixup_red_left(struct kmem_cache *s, void *p)
d86bd1be
JK
129{
130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
131 p += s->red_left_pad;
132
133 return p;
134}
135
345c905d
JK
136static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
137{
138#ifdef CONFIG_SLUB_CPU_PARTIAL
139 return !kmem_cache_debug(s);
140#else
141 return false;
142#endif
143}
144
81819f0f
CL
145/*
146 * Issues still to be resolved:
147 *
81819f0f
CL
148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
149 *
81819f0f
CL
150 * - Variable sizing of the per node arrays
151 */
152
153/* Enable to test recovery from slab corruption on boot */
154#undef SLUB_RESILIENCY_TEST
155
b789ef51
CL
156/* Enable to log cmpxchg failures */
157#undef SLUB_DEBUG_CMPXCHG
158
2086d26a
CL
159/*
160 * Mininum number of partial slabs. These will be left on the partial
161 * lists even if they are empty. kmem_cache_shrink may reclaim them.
162 */
76be8950 163#define MIN_PARTIAL 5
e95eed57 164
2086d26a
CL
165/*
166 * Maximum number of desirable partial slabs.
167 * The existence of more partial slabs makes kmem_cache_shrink
721ae22a 168 * sort the partial list by the number of objects in use.
2086d26a
CL
169 */
170#define MAX_PARTIAL 10
171
becfda68 172#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
81819f0f 173 SLAB_POISON | SLAB_STORE_USER)
672bba3a 174
149daaf3
LA
175/*
176 * These debug flags cannot use CMPXCHG because there might be consistency
177 * issues when checking or reading debug information
178 */
179#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
180 SLAB_TRACE)
181
182
fa5ec8a1 183/*
3de47213
DR
184 * Debugging flags that require metadata to be stored in the slab. These get
185 * disabled when slub_debug=O is used and a cache's min order increases with
186 * metadata.
fa5ec8a1 187 */
3de47213 188#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1 189
210b5c06
CG
190#define OO_SHIFT 16
191#define OO_MASK ((1 << OO_SHIFT) - 1)
50d5c41c 192#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
210b5c06 193
81819f0f 194/* Internal SLUB flags */
d50112ed 195/* Poison object */
4fd0b46e 196#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
d50112ed 197/* Use cmpxchg_double */
4fd0b46e 198#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
81819f0f 199
02cbc874
CL
200/*
201 * Tracking user of a slab.
202 */
d6543e39 203#define TRACK_ADDRS_COUNT 16
02cbc874 204struct track {
ce71e27c 205 unsigned long addr; /* Called from address */
d6543e39
BG
206#ifdef CONFIG_STACKTRACE
207 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
208#endif
02cbc874
CL
209 int cpu; /* Was running on cpu */
210 int pid; /* Pid context */
211 unsigned long when; /* When did the operation occur */
212};
213
214enum track_item { TRACK_ALLOC, TRACK_FREE };
215
ab4d5ed5 216#ifdef CONFIG_SYSFS
81819f0f
CL
217static int sysfs_slab_add(struct kmem_cache *);
218static int sysfs_slab_alias(struct kmem_cache *, const char *);
107dab5c 219static void memcg_propagate_slab_attrs(struct kmem_cache *s);
bf5eb3de 220static void sysfs_slab_remove(struct kmem_cache *s);
81819f0f 221#else
0c710013
CL
222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
224 { return 0; }
107dab5c 225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
bf5eb3de 226static inline void sysfs_slab_remove(struct kmem_cache *s) { }
81819f0f
CL
227#endif
228
4fdccdfb 229static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc
CL
230{
231#ifdef CONFIG_SLUB_STATS
88da03a6
CL
232 /*
233 * The rmw is racy on a preemptible kernel but this is acceptable, so
234 * avoid this_cpu_add()'s irq-disable overhead.
235 */
236 raw_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc
CL
237#endif
238}
239
81819f0f
CL
240/********************************************************************
241 * Core slab cache functions
242 *******************************************************************/
243
2482ddec
KC
244/*
245 * Returns freelist pointer (ptr). With hardening, this is obfuscated
246 * with an XOR of the address where the pointer is held and a per-cache
247 * random number.
248 */
249static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
250 unsigned long ptr_addr)
251{
252#ifdef CONFIG_SLAB_FREELIST_HARDENED
253 return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
254#else
255 return ptr;
256#endif
257}
258
259/* Returns the freelist pointer recorded at location ptr_addr. */
260static inline void *freelist_dereference(const struct kmem_cache *s,
261 void *ptr_addr)
262{
263 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
264 (unsigned long)ptr_addr);
265}
266
7656c72b
CL
267static inline void *get_freepointer(struct kmem_cache *s, void *object)
268{
2482ddec 269 return freelist_dereference(s, object + s->offset);
7656c72b
CL
270}
271
0ad9500e
ED
272static void prefetch_freepointer(const struct kmem_cache *s, void *object)
273{
2482ddec
KC
274 if (object)
275 prefetch(freelist_dereference(s, object + s->offset));
0ad9500e
ED
276}
277
1393d9a1
CL
278static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
279{
2482ddec 280 unsigned long freepointer_addr;
1393d9a1
CL
281 void *p;
282
922d566c
JK
283 if (!debug_pagealloc_enabled())
284 return get_freepointer(s, object);
285
2482ddec
KC
286 freepointer_addr = (unsigned long)object + s->offset;
287 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
288 return freelist_ptr(s, p, freepointer_addr);
1393d9a1
CL
289}
290
7656c72b
CL
291static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
292{
2482ddec
KC
293 unsigned long freeptr_addr = (unsigned long)object + s->offset;
294
ce6fa91b
AP
295#ifdef CONFIG_SLAB_FREELIST_HARDENED
296 BUG_ON(object == fp); /* naive detection of double free or corruption */
297#endif
298
2482ddec 299 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
7656c72b
CL
300}
301
302/* Loop over all objects in a slab */
224a88be 303#define for_each_object(__p, __s, __addr, __objects) \
d86bd1be
JK
304 for (__p = fixup_red_left(__s, __addr); \
305 __p < (__addr) + (__objects) * (__s)->size; \
306 __p += (__s)->size)
7656c72b 307
54266640 308#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
d86bd1be
JK
309 for (__p = fixup_red_left(__s, __addr), __idx = 1; \
310 __idx <= __objects; \
311 __p += (__s)->size, __idx++)
54266640 312
7656c72b
CL
313/* Determine object index from a given position */
314static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
315{
316 return (p - addr) / s->size;
317}
318
ab9a0f19
LJ
319static inline int order_objects(int order, unsigned long size, int reserved)
320{
321 return ((PAGE_SIZE << order) - reserved) / size;
322}
323
834f3d11 324static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f19 325 unsigned long size, int reserved)
834f3d11
CL
326{
327 struct kmem_cache_order_objects x = {
ab9a0f19 328 (order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d11
CL
329 };
330
331 return x;
332}
333
334static inline int oo_order(struct kmem_cache_order_objects x)
335{
210b5c06 336 return x.x >> OO_SHIFT;
834f3d11
CL
337}
338
339static inline int oo_objects(struct kmem_cache_order_objects x)
340{
210b5c06 341 return x.x & OO_MASK;
834f3d11
CL
342}
343
881db7fb
CL
344/*
345 * Per slab locking using the pagelock
346 */
347static __always_inline void slab_lock(struct page *page)
348{
48c935ad 349 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
350 bit_spin_lock(PG_locked, &page->flags);
351}
352
353static __always_inline void slab_unlock(struct page *page)
354{
48c935ad 355 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
356 __bit_spin_unlock(PG_locked, &page->flags);
357}
358
a0320865
DH
359static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
360{
361 struct page tmp;
362 tmp.counters = counters_new;
363 /*
364 * page->counters can cover frozen/inuse/objects as well
0139aa7b
JK
365 * as page->_refcount. If we assign to ->counters directly
366 * we run the risk of losing updates to page->_refcount, so
a0320865
DH
367 * be careful and only assign to the fields we need.
368 */
369 page->frozen = tmp.frozen;
370 page->inuse = tmp.inuse;
371 page->objects = tmp.objects;
372}
373
1d07171c
CL
374/* Interrupts must be disabled (for the fallback code to work right) */
375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
376 void *freelist_old, unsigned long counters_old,
377 void *freelist_new, unsigned long counters_new,
378 const char *n)
379{
380 VM_BUG_ON(!irqs_disabled());
2565409f
HC
381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
382 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c 383 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 384 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
385 freelist_old, counters_old,
386 freelist_new, counters_new))
6f6528a1 387 return true;
1d07171c
CL
388 } else
389#endif
390 {
391 slab_lock(page);
d0e0ac97
CG
392 if (page->freelist == freelist_old &&
393 page->counters == counters_old) {
1d07171c 394 page->freelist = freelist_new;
a0320865 395 set_page_slub_counters(page, counters_new);
1d07171c 396 slab_unlock(page);
6f6528a1 397 return true;
1d07171c
CL
398 }
399 slab_unlock(page);
400 }
401
402 cpu_relax();
403 stat(s, CMPXCHG_DOUBLE_FAIL);
404
405#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 406 pr_info("%s %s: cmpxchg double redo ", n, s->name);
1d07171c
CL
407#endif
408
6f6528a1 409 return false;
1d07171c
CL
410}
411
b789ef51
CL
412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
413 void *freelist_old, unsigned long counters_old,
414 void *freelist_new, unsigned long counters_new,
415 const char *n)
416{
2565409f
HC
417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
418 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51 419 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 420 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
421 freelist_old, counters_old,
422 freelist_new, counters_new))
6f6528a1 423 return true;
b789ef51
CL
424 } else
425#endif
426 {
1d07171c
CL
427 unsigned long flags;
428
429 local_irq_save(flags);
881db7fb 430 slab_lock(page);
d0e0ac97
CG
431 if (page->freelist == freelist_old &&
432 page->counters == counters_old) {
b789ef51 433 page->freelist = freelist_new;
a0320865 434 set_page_slub_counters(page, counters_new);
881db7fb 435 slab_unlock(page);
1d07171c 436 local_irq_restore(flags);
6f6528a1 437 return true;
b789ef51 438 }
881db7fb 439 slab_unlock(page);
1d07171c 440 local_irq_restore(flags);
b789ef51
CL
441 }
442
443 cpu_relax();
444 stat(s, CMPXCHG_DOUBLE_FAIL);
445
446#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 447 pr_info("%s %s: cmpxchg double redo ", n, s->name);
b789ef51
CL
448#endif
449
6f6528a1 450 return false;
b789ef51
CL
451}
452
41ecc55b 453#ifdef CONFIG_SLUB_DEBUG
5f80b13a
CL
454/*
455 * Determine a map of object in use on a page.
456 *
881db7fb 457 * Node listlock must be held to guarantee that the page does
5f80b13a
CL
458 * not vanish from under us.
459 */
460static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
461{
462 void *p;
463 void *addr = page_address(page);
464
465 for (p = page->freelist; p; p = get_freepointer(s, p))
466 set_bit(slab_index(p, s, addr), map);
467}
468
d86bd1be
JK
469static inline int size_from_object(struct kmem_cache *s)
470{
471 if (s->flags & SLAB_RED_ZONE)
472 return s->size - s->red_left_pad;
473
474 return s->size;
475}
476
477static inline void *restore_red_left(struct kmem_cache *s, void *p)
478{
479 if (s->flags & SLAB_RED_ZONE)
480 p -= s->red_left_pad;
481
482 return p;
483}
484
41ecc55b
CL
485/*
486 * Debug settings:
487 */
89d3c87e 488#if defined(CONFIG_SLUB_DEBUG_ON)
d50112ed 489static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
f0630fff 490#else
d50112ed 491static slab_flags_t slub_debug;
f0630fff 492#endif
41ecc55b
CL
493
494static char *slub_debug_slabs;
fa5ec8a1 495static int disable_higher_order_debug;
41ecc55b 496
a79316c6
AR
497/*
498 * slub is about to manipulate internal object metadata. This memory lies
499 * outside the range of the allocated object, so accessing it would normally
500 * be reported by kasan as a bounds error. metadata_access_enable() is used
501 * to tell kasan that these accesses are OK.
502 */
503static inline void metadata_access_enable(void)
504{
505 kasan_disable_current();
506}
507
508static inline void metadata_access_disable(void)
509{
510 kasan_enable_current();
511}
512
81819f0f
CL
513/*
514 * Object debugging
515 */
d86bd1be
JK
516
517/* Verify that a pointer has an address that is valid within a slab page */
518static inline int check_valid_pointer(struct kmem_cache *s,
519 struct page *page, void *object)
520{
521 void *base;
522
523 if (!object)
524 return 1;
525
526 base = page_address(page);
527 object = restore_red_left(s, object);
528 if (object < base || object >= base + page->objects * s->size ||
529 (object - base) % s->size) {
530 return 0;
531 }
532
533 return 1;
534}
535
aa2efd5e
DT
536static void print_section(char *level, char *text, u8 *addr,
537 unsigned int length)
81819f0f 538{
a79316c6 539 metadata_access_enable();
aa2efd5e 540 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
ffc79d28 541 length, 1);
a79316c6 542 metadata_access_disable();
81819f0f
CL
543}
544
81819f0f
CL
545static struct track *get_track(struct kmem_cache *s, void *object,
546 enum track_item alloc)
547{
548 struct track *p;
549
550 if (s->offset)
551 p = object + s->offset + sizeof(void *);
552 else
553 p = object + s->inuse;
554
555 return p + alloc;
556}
557
558static void set_track(struct kmem_cache *s, void *object,
ce71e27c 559 enum track_item alloc, unsigned long addr)
81819f0f 560{
1a00df4a 561 struct track *p = get_track(s, object, alloc);
81819f0f 562
81819f0f 563 if (addr) {
d6543e39
BG
564#ifdef CONFIG_STACKTRACE
565 struct stack_trace trace;
566 int i;
567
568 trace.nr_entries = 0;
569 trace.max_entries = TRACK_ADDRS_COUNT;
570 trace.entries = p->addrs;
571 trace.skip = 3;
a79316c6 572 metadata_access_enable();
d6543e39 573 save_stack_trace(&trace);
a79316c6 574 metadata_access_disable();
d6543e39
BG
575
576 /* See rant in lockdep.c */
577 if (trace.nr_entries != 0 &&
578 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
579 trace.nr_entries--;
580
581 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
582 p->addrs[i] = 0;
583#endif
81819f0f
CL
584 p->addr = addr;
585 p->cpu = smp_processor_id();
88e4ccf2 586 p->pid = current->pid;
81819f0f
CL
587 p->when = jiffies;
588 } else
589 memset(p, 0, sizeof(struct track));
590}
591
81819f0f
CL
592static void init_tracking(struct kmem_cache *s, void *object)
593{
24922684
CL
594 if (!(s->flags & SLAB_STORE_USER))
595 return;
596
ce71e27c
EGM
597 set_track(s, object, TRACK_FREE, 0UL);
598 set_track(s, object, TRACK_ALLOC, 0UL);
81819f0f
CL
599}
600
601static void print_track(const char *s, struct track *t)
602{
603 if (!t->addr)
604 return;
605
f9f58285
FF
606 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
607 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
d6543e39
BG
608#ifdef CONFIG_STACKTRACE
609 {
610 int i;
611 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
612 if (t->addrs[i])
f9f58285 613 pr_err("\t%pS\n", (void *)t->addrs[i]);
d6543e39
BG
614 else
615 break;
616 }
617#endif
24922684
CL
618}
619
620static void print_tracking(struct kmem_cache *s, void *object)
621{
622 if (!(s->flags & SLAB_STORE_USER))
623 return;
624
625 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
626 print_track("Freed", get_track(s, object, TRACK_FREE));
627}
628
629static void print_page_info(struct page *page)
630{
f9f58285 631 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
d0e0ac97 632 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
633
634}
635
636static void slab_bug(struct kmem_cache *s, char *fmt, ...)
637{
ecc42fbe 638 struct va_format vaf;
24922684 639 va_list args;
24922684
CL
640
641 va_start(args, fmt);
ecc42fbe
FF
642 vaf.fmt = fmt;
643 vaf.va = &args;
f9f58285 644 pr_err("=============================================================================\n");
ecc42fbe 645 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
f9f58285 646 pr_err("-----------------------------------------------------------------------------\n\n");
645df230 647
373d4d09 648 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
ecc42fbe 649 va_end(args);
81819f0f
CL
650}
651
24922684
CL
652static void slab_fix(struct kmem_cache *s, char *fmt, ...)
653{
ecc42fbe 654 struct va_format vaf;
24922684 655 va_list args;
24922684
CL
656
657 va_start(args, fmt);
ecc42fbe
FF
658 vaf.fmt = fmt;
659 vaf.va = &args;
660 pr_err("FIX %s: %pV\n", s->name, &vaf);
24922684 661 va_end(args);
24922684
CL
662}
663
664static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
665{
666 unsigned int off; /* Offset of last byte */
a973e9dd 667 u8 *addr = page_address(page);
24922684
CL
668
669 print_tracking(s, p);
670
671 print_page_info(page);
672
f9f58285
FF
673 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
674 p, p - addr, get_freepointer(s, p));
24922684 675
d86bd1be 676 if (s->flags & SLAB_RED_ZONE)
aa2efd5e
DT
677 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
678 s->red_left_pad);
d86bd1be 679 else if (p > addr + 16)
aa2efd5e 680 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
81819f0f 681
aa2efd5e
DT
682 print_section(KERN_ERR, "Object ", p,
683 min_t(unsigned long, s->object_size, PAGE_SIZE));
81819f0f 684 if (s->flags & SLAB_RED_ZONE)
aa2efd5e 685 print_section(KERN_ERR, "Redzone ", p + s->object_size,
3b0efdfa 686 s->inuse - s->object_size);
81819f0f 687
81819f0f
CL
688 if (s->offset)
689 off = s->offset + sizeof(void *);
690 else
691 off = s->inuse;
692
24922684 693 if (s->flags & SLAB_STORE_USER)
81819f0f 694 off += 2 * sizeof(struct track);
81819f0f 695
80a9201a
AP
696 off += kasan_metadata_size(s);
697
d86bd1be 698 if (off != size_from_object(s))
81819f0f 699 /* Beginning of the filler is the free pointer */
aa2efd5e
DT
700 print_section(KERN_ERR, "Padding ", p + off,
701 size_from_object(s) - off);
24922684
CL
702
703 dump_stack();
81819f0f
CL
704}
705
75c66def 706void object_err(struct kmem_cache *s, struct page *page,
81819f0f
CL
707 u8 *object, char *reason)
708{
3dc50637 709 slab_bug(s, "%s", reason);
24922684 710 print_trailer(s, page, object);
81819f0f
CL
711}
712
d0e0ac97
CG
713static void slab_err(struct kmem_cache *s, struct page *page,
714 const char *fmt, ...)
81819f0f
CL
715{
716 va_list args;
717 char buf[100];
718
24922684
CL
719 va_start(args, fmt);
720 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 721 va_end(args);
3dc50637 722 slab_bug(s, "%s", buf);
24922684 723 print_page_info(page);
81819f0f
CL
724 dump_stack();
725}
726
f7cb1933 727static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0f
CL
728{
729 u8 *p = object;
730
d86bd1be
JK
731 if (s->flags & SLAB_RED_ZONE)
732 memset(p - s->red_left_pad, val, s->red_left_pad);
733
81819f0f 734 if (s->flags & __OBJECT_POISON) {
3b0efdfa
CL
735 memset(p, POISON_FREE, s->object_size - 1);
736 p[s->object_size - 1] = POISON_END;
81819f0f
CL
737 }
738
739 if (s->flags & SLAB_RED_ZONE)
3b0efdfa 740 memset(p + s->object_size, val, s->inuse - s->object_size);
81819f0f
CL
741}
742
24922684
CL
743static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
744 void *from, void *to)
745{
746 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
747 memset(from, data, to - from);
748}
749
750static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
751 u8 *object, char *what,
06428780 752 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
753{
754 u8 *fault;
755 u8 *end;
756
a79316c6 757 metadata_access_enable();
79824820 758 fault = memchr_inv(start, value, bytes);
a79316c6 759 metadata_access_disable();
24922684
CL
760 if (!fault)
761 return 1;
762
763 end = start + bytes;
764 while (end > fault && end[-1] == value)
765 end--;
766
767 slab_bug(s, "%s overwritten", what);
f9f58285 768 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
24922684
CL
769 fault, end - 1, fault[0], value);
770 print_trailer(s, page, object);
771
772 restore_bytes(s, what, value, fault, end);
773 return 0;
81819f0f
CL
774}
775
81819f0f
CL
776/*
777 * Object layout:
778 *
779 * object address
780 * Bytes of the object to be managed.
781 * If the freepointer may overlay the object then the free
782 * pointer is the first word of the object.
672bba3a 783 *
81819f0f
CL
784 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
785 * 0xa5 (POISON_END)
786 *
3b0efdfa 787 * object + s->object_size
81819f0f 788 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a 789 * Padding is extended by another word if Redzoning is enabled and
3b0efdfa 790 * object_size == inuse.
672bba3a 791 *
81819f0f
CL
792 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
793 * 0xcc (RED_ACTIVE) for objects in use.
794 *
795 * object + s->inuse
672bba3a
CL
796 * Meta data starts here.
797 *
81819f0f
CL
798 * A. Free pointer (if we cannot overwrite object on free)
799 * B. Tracking data for SLAB_STORE_USER
672bba3a 800 * C. Padding to reach required alignment boundary or at mininum
6446faa2 801 * one word if debugging is on to be able to detect writes
672bba3a
CL
802 * before the word boundary.
803 *
804 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
805 *
806 * object + s->size
672bba3a 807 * Nothing is used beyond s->size.
81819f0f 808 *
3b0efdfa 809 * If slabcaches are merged then the object_size and inuse boundaries are mostly
672bba3a 810 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
811 * may be used with merged slabcaches.
812 */
813
81819f0f
CL
814static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
815{
816 unsigned long off = s->inuse; /* The end of info */
817
818 if (s->offset)
819 /* Freepointer is placed after the object. */
820 off += sizeof(void *);
821
822 if (s->flags & SLAB_STORE_USER)
823 /* We also have user information there */
824 off += 2 * sizeof(struct track);
825
80a9201a
AP
826 off += kasan_metadata_size(s);
827
d86bd1be 828 if (size_from_object(s) == off)
81819f0f
CL
829 return 1;
830
24922684 831 return check_bytes_and_report(s, page, p, "Object padding",
d86bd1be 832 p + off, POISON_INUSE, size_from_object(s) - off);
81819f0f
CL
833}
834
39b26464 835/* Check the pad bytes at the end of a slab page */
81819f0f
CL
836static int slab_pad_check(struct kmem_cache *s, struct page *page)
837{
24922684
CL
838 u8 *start;
839 u8 *fault;
840 u8 *end;
5d682681 841 u8 *pad;
24922684
CL
842 int length;
843 int remainder;
81819f0f
CL
844
845 if (!(s->flags & SLAB_POISON))
846 return 1;
847
a973e9dd 848 start = page_address(page);
ab9a0f19 849 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b26464
CL
850 end = start + length;
851 remainder = length % s->size;
81819f0f
CL
852 if (!remainder)
853 return 1;
854
5d682681 855 pad = end - remainder;
a79316c6 856 metadata_access_enable();
5d682681 857 fault = memchr_inv(pad, POISON_INUSE, remainder);
a79316c6 858 metadata_access_disable();
24922684
CL
859 if (!fault)
860 return 1;
861 while (end > fault && end[-1] == POISON_INUSE)
862 end--;
863
864 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
5d682681 865 print_section(KERN_ERR, "Padding ", pad, remainder);
24922684 866
5d682681 867 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
24922684 868 return 0;
81819f0f
CL
869}
870
871static int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 872 void *object, u8 val)
81819f0f
CL
873{
874 u8 *p = object;
3b0efdfa 875 u8 *endobject = object + s->object_size;
81819f0f
CL
876
877 if (s->flags & SLAB_RED_ZONE) {
d86bd1be
JK
878 if (!check_bytes_and_report(s, page, object, "Redzone",
879 object - s->red_left_pad, val, s->red_left_pad))
880 return 0;
881
24922684 882 if (!check_bytes_and_report(s, page, object, "Redzone",
3b0efdfa 883 endobject, val, s->inuse - s->object_size))
81819f0f 884 return 0;
81819f0f 885 } else {
3b0efdfa 886 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
3adbefee 887 check_bytes_and_report(s, page, p, "Alignment padding",
d0e0ac97
CG
888 endobject, POISON_INUSE,
889 s->inuse - s->object_size);
3adbefee 890 }
81819f0f
CL
891 }
892
893 if (s->flags & SLAB_POISON) {
f7cb1933 894 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
24922684 895 (!check_bytes_and_report(s, page, p, "Poison", p,
3b0efdfa 896 POISON_FREE, s->object_size - 1) ||
24922684 897 !check_bytes_and_report(s, page, p, "Poison",
3b0efdfa 898 p + s->object_size - 1, POISON_END, 1)))
81819f0f 899 return 0;
81819f0f
CL
900 /*
901 * check_pad_bytes cleans up on its own.
902 */
903 check_pad_bytes(s, page, p);
904 }
905
f7cb1933 906 if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0f
CL
907 /*
908 * Object and freepointer overlap. Cannot check
909 * freepointer while object is allocated.
910 */
911 return 1;
912
913 /* Check free pointer validity */
914 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
915 object_err(s, page, p, "Freepointer corrupt");
916 /*
9f6c708e 917 * No choice but to zap it and thus lose the remainder
81819f0f 918 * of the free objects in this slab. May cause
672bba3a 919 * another error because the object count is now wrong.
81819f0f 920 */
a973e9dd 921 set_freepointer(s, p, NULL);
81819f0f
CL
922 return 0;
923 }
924 return 1;
925}
926
927static int check_slab(struct kmem_cache *s, struct page *page)
928{
39b26464
CL
929 int maxobj;
930
81819f0f
CL
931 VM_BUG_ON(!irqs_disabled());
932
933 if (!PageSlab(page)) {
24922684 934 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
935 return 0;
936 }
39b26464 937
ab9a0f19 938 maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b26464
CL
939 if (page->objects > maxobj) {
940 slab_err(s, page, "objects %u > max %u",
f6edde9c 941 page->objects, maxobj);
39b26464
CL
942 return 0;
943 }
944 if (page->inuse > page->objects) {
24922684 945 slab_err(s, page, "inuse %u > max %u",
f6edde9c 946 page->inuse, page->objects);
81819f0f
CL
947 return 0;
948 }
949 /* Slab_pad_check fixes things up after itself */
950 slab_pad_check(s, page);
951 return 1;
952}
953
954/*
672bba3a
CL
955 * Determine if a certain object on a page is on the freelist. Must hold the
956 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
957 */
958static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
959{
960 int nr = 0;
881db7fb 961 void *fp;
81819f0f 962 void *object = NULL;
f6edde9c 963 int max_objects;
81819f0f 964
881db7fb 965 fp = page->freelist;
39b26464 966 while (fp && nr <= page->objects) {
81819f0f
CL
967 if (fp == search)
968 return 1;
969 if (!check_valid_pointer(s, page, fp)) {
970 if (object) {
971 object_err(s, page, object,
972 "Freechain corrupt");
a973e9dd 973 set_freepointer(s, object, NULL);
81819f0f 974 } else {
24922684 975 slab_err(s, page, "Freepointer corrupt");
a973e9dd 976 page->freelist = NULL;
39b26464 977 page->inuse = page->objects;
24922684 978 slab_fix(s, "Freelist cleared");
81819f0f
CL
979 return 0;
980 }
981 break;
982 }
983 object = fp;
984 fp = get_freepointer(s, object);
985 nr++;
986 }
987
ab9a0f19 988 max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c06
CG
989 if (max_objects > MAX_OBJS_PER_PAGE)
990 max_objects = MAX_OBJS_PER_PAGE;
224a88be
CL
991
992 if (page->objects != max_objects) {
756a025f
JP
993 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
994 page->objects, max_objects);
224a88be
CL
995 page->objects = max_objects;
996 slab_fix(s, "Number of objects adjusted.");
997 }
39b26464 998 if (page->inuse != page->objects - nr) {
756a025f
JP
999 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1000 page->inuse, page->objects - nr);
39b26464 1001 page->inuse = page->objects - nr;
24922684 1002 slab_fix(s, "Object count adjusted.");
81819f0f
CL
1003 }
1004 return search == NULL;
1005}
1006
0121c619
CL
1007static void trace(struct kmem_cache *s, struct page *page, void *object,
1008 int alloc)
3ec09742
CL
1009{
1010 if (s->flags & SLAB_TRACE) {
f9f58285 1011 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
3ec09742
CL
1012 s->name,
1013 alloc ? "alloc" : "free",
1014 object, page->inuse,
1015 page->freelist);
1016
1017 if (!alloc)
aa2efd5e 1018 print_section(KERN_INFO, "Object ", (void *)object,
d0e0ac97 1019 s->object_size);
3ec09742
CL
1020
1021 dump_stack();
1022 }
1023}
1024
643b1138 1025/*
672bba3a 1026 * Tracking of fully allocated slabs for debugging purposes.
643b1138 1027 */
5cc6eee8
CL
1028static void add_full(struct kmem_cache *s,
1029 struct kmem_cache_node *n, struct page *page)
643b1138 1030{
5cc6eee8
CL
1031 if (!(s->flags & SLAB_STORE_USER))
1032 return;
1033
255d0884 1034 lockdep_assert_held(&n->list_lock);
643b1138 1035 list_add(&page->lru, &n->full);
643b1138
CL
1036}
1037
c65c1877 1038static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
643b1138 1039{
643b1138
CL
1040 if (!(s->flags & SLAB_STORE_USER))
1041 return;
1042
255d0884 1043 lockdep_assert_held(&n->list_lock);
643b1138 1044 list_del(&page->lru);
643b1138
CL
1045}
1046
0f389ec6
CL
1047/* Tracking of the number of slabs for debugging purposes */
1048static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1049{
1050 struct kmem_cache_node *n = get_node(s, node);
1051
1052 return atomic_long_read(&n->nr_slabs);
1053}
1054
26c02cf0
AB
1055static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1056{
1057 return atomic_long_read(&n->nr_slabs);
1058}
1059
205ab99d 1060static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1061{
1062 struct kmem_cache_node *n = get_node(s, node);
1063
1064 /*
1065 * May be called early in order to allocate a slab for the
1066 * kmem_cache_node structure. Solve the chicken-egg
1067 * dilemma by deferring the increment of the count during
1068 * bootstrap (see early_kmem_cache_node_alloc).
1069 */
338b2642 1070 if (likely(n)) {
0f389ec6 1071 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
1072 atomic_long_add(objects, &n->total_objects);
1073 }
0f389ec6 1074}
205ab99d 1075static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1076{
1077 struct kmem_cache_node *n = get_node(s, node);
1078
1079 atomic_long_dec(&n->nr_slabs);
205ab99d 1080 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
1081}
1082
1083/* Object debug checks for alloc/free paths */
3ec09742
CL
1084static void setup_object_debug(struct kmem_cache *s, struct page *page,
1085 void *object)
1086{
1087 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1088 return;
1089
f7cb1933 1090 init_object(s, object, SLUB_RED_INACTIVE);
3ec09742
CL
1091 init_tracking(s, object);
1092}
1093
becfda68 1094static inline int alloc_consistency_checks(struct kmem_cache *s,
d0e0ac97 1095 struct page *page,
ce71e27c 1096 void *object, unsigned long addr)
81819f0f
CL
1097{
1098 if (!check_slab(s, page))
becfda68 1099 return 0;
81819f0f 1100
81819f0f
CL
1101 if (!check_valid_pointer(s, page, object)) {
1102 object_err(s, page, object, "Freelist Pointer check fails");
becfda68 1103 return 0;
81819f0f
CL
1104 }
1105
f7cb1933 1106 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
becfda68
LA
1107 return 0;
1108
1109 return 1;
1110}
1111
1112static noinline int alloc_debug_processing(struct kmem_cache *s,
1113 struct page *page,
1114 void *object, unsigned long addr)
1115{
1116 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1117 if (!alloc_consistency_checks(s, page, object, addr))
1118 goto bad;
1119 }
81819f0f 1120
3ec09742
CL
1121 /* Success perform special debug activities for allocs */
1122 if (s->flags & SLAB_STORE_USER)
1123 set_track(s, object, TRACK_ALLOC, addr);
1124 trace(s, page, object, 1);
f7cb1933 1125 init_object(s, object, SLUB_RED_ACTIVE);
81819f0f 1126 return 1;
3ec09742 1127
81819f0f
CL
1128bad:
1129 if (PageSlab(page)) {
1130 /*
1131 * If this is a slab page then lets do the best we can
1132 * to avoid issues in the future. Marking all objects
672bba3a 1133 * as used avoids touching the remaining objects.
81819f0f 1134 */
24922684 1135 slab_fix(s, "Marking all objects used");
39b26464 1136 page->inuse = page->objects;
a973e9dd 1137 page->freelist = NULL;
81819f0f
CL
1138 }
1139 return 0;
1140}
1141
becfda68
LA
1142static inline int free_consistency_checks(struct kmem_cache *s,
1143 struct page *page, void *object, unsigned long addr)
81819f0f 1144{
81819f0f 1145 if (!check_valid_pointer(s, page, object)) {
70d71228 1146 slab_err(s, page, "Invalid object pointer 0x%p", object);
becfda68 1147 return 0;
81819f0f
CL
1148 }
1149
1150 if (on_freelist(s, page, object)) {
24922684 1151 object_err(s, page, object, "Object already free");
becfda68 1152 return 0;
81819f0f
CL
1153 }
1154
f7cb1933 1155 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
becfda68 1156 return 0;
81819f0f 1157
1b4f59e3 1158 if (unlikely(s != page->slab_cache)) {
3adbefee 1159 if (!PageSlab(page)) {
756a025f
JP
1160 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1161 object);
1b4f59e3 1162 } else if (!page->slab_cache) {
f9f58285
FF
1163 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1164 object);
70d71228 1165 dump_stack();
06428780 1166 } else
24922684
CL
1167 object_err(s, page, object,
1168 "page slab pointer corrupt.");
becfda68
LA
1169 return 0;
1170 }
1171 return 1;
1172}
1173
1174/* Supports checking bulk free of a constructed freelist */
1175static noinline int free_debug_processing(
1176 struct kmem_cache *s, struct page *page,
1177 void *head, void *tail, int bulk_cnt,
1178 unsigned long addr)
1179{
1180 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1181 void *object = head;
1182 int cnt = 0;
1183 unsigned long uninitialized_var(flags);
1184 int ret = 0;
1185
1186 spin_lock_irqsave(&n->list_lock, flags);
1187 slab_lock(page);
1188
1189 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1190 if (!check_slab(s, page))
1191 goto out;
1192 }
1193
1194next_object:
1195 cnt++;
1196
1197 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1198 if (!free_consistency_checks(s, page, object, addr))
1199 goto out;
81819f0f 1200 }
3ec09742 1201
3ec09742
CL
1202 if (s->flags & SLAB_STORE_USER)
1203 set_track(s, object, TRACK_FREE, addr);
1204 trace(s, page, object, 0);
81084651 1205 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
f7cb1933 1206 init_object(s, object, SLUB_RED_INACTIVE);
81084651
JDB
1207
1208 /* Reached end of constructed freelist yet? */
1209 if (object != tail) {
1210 object = get_freepointer(s, object);
1211 goto next_object;
1212 }
804aa132
LA
1213 ret = 1;
1214
5c2e4bbb 1215out:
81084651
JDB
1216 if (cnt != bulk_cnt)
1217 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1218 bulk_cnt, cnt);
1219
881db7fb 1220 slab_unlock(page);
282acb43 1221 spin_unlock_irqrestore(&n->list_lock, flags);
804aa132
LA
1222 if (!ret)
1223 slab_fix(s, "Object at 0x%p not freed", object);
1224 return ret;
81819f0f
CL
1225}
1226
41ecc55b
CL
1227static int __init setup_slub_debug(char *str)
1228{
f0630fff
CL
1229 slub_debug = DEBUG_DEFAULT_FLAGS;
1230 if (*str++ != '=' || !*str)
1231 /*
1232 * No options specified. Switch on full debugging.
1233 */
1234 goto out;
1235
1236 if (*str == ',')
1237 /*
1238 * No options but restriction on slabs. This means full
1239 * debugging for slabs matching a pattern.
1240 */
1241 goto check_slabs;
1242
1243 slub_debug = 0;
1244 if (*str == '-')
1245 /*
1246 * Switch off all debugging measures.
1247 */
1248 goto out;
1249
1250 /*
1251 * Determine which debug features should be switched on
1252 */
06428780 1253 for (; *str && *str != ','; str++) {
f0630fff
CL
1254 switch (tolower(*str)) {
1255 case 'f':
becfda68 1256 slub_debug |= SLAB_CONSISTENCY_CHECKS;
f0630fff
CL
1257 break;
1258 case 'z':
1259 slub_debug |= SLAB_RED_ZONE;
1260 break;
1261 case 'p':
1262 slub_debug |= SLAB_POISON;
1263 break;
1264 case 'u':
1265 slub_debug |= SLAB_STORE_USER;
1266 break;
1267 case 't':
1268 slub_debug |= SLAB_TRACE;
1269 break;
4c13dd3b
DM
1270 case 'a':
1271 slub_debug |= SLAB_FAILSLAB;
1272 break;
08303a73
CA
1273 case 'o':
1274 /*
1275 * Avoid enabling debugging on caches if its minimum
1276 * order would increase as a result.
1277 */
1278 disable_higher_order_debug = 1;
1279 break;
f0630fff 1280 default:
f9f58285
FF
1281 pr_err("slub_debug option '%c' unknown. skipped\n",
1282 *str);
f0630fff 1283 }
41ecc55b
CL
1284 }
1285
f0630fff 1286check_slabs:
41ecc55b
CL
1287 if (*str == ',')
1288 slub_debug_slabs = str + 1;
f0630fff 1289out:
41ecc55b
CL
1290 return 1;
1291}
1292
1293__setup("slub_debug", setup_slub_debug);
1294
d50112ed
AD
1295slab_flags_t kmem_cache_flags(unsigned long object_size,
1296 slab_flags_t flags, const char *name,
51cc5068 1297 void (*ctor)(void *))
41ecc55b
CL
1298{
1299 /*
e153362a 1300 * Enable debugging if selected on the kernel commandline.
41ecc55b 1301 */
c6f58d9b
CL
1302 if (slub_debug && (!slub_debug_slabs || (name &&
1303 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
3de47213 1304 flags |= slub_debug;
ba0268a8
CL
1305
1306 return flags;
41ecc55b 1307}
b4a64718 1308#else /* !CONFIG_SLUB_DEBUG */
3ec09742
CL
1309static inline void setup_object_debug(struct kmem_cache *s,
1310 struct page *page, void *object) {}
41ecc55b 1311
3ec09742 1312static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c 1313 struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b 1314
282acb43 1315static inline int free_debug_processing(
81084651
JDB
1316 struct kmem_cache *s, struct page *page,
1317 void *head, void *tail, int bulk_cnt,
282acb43 1318 unsigned long addr) { return 0; }
41ecc55b 1319
41ecc55b
CL
1320static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1321 { return 1; }
1322static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 1323 void *object, u8 val) { return 1; }
5cc6eee8
CL
1324static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1325 struct page *page) {}
c65c1877
PZ
1326static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1327 struct page *page) {}
d50112ed
AD
1328slab_flags_t kmem_cache_flags(unsigned long object_size,
1329 slab_flags_t flags, const char *name,
51cc5068 1330 void (*ctor)(void *))
ba0268a8
CL
1331{
1332 return flags;
1333}
41ecc55b 1334#define slub_debug 0
0f389ec6 1335
fdaa45e9
IM
1336#define disable_higher_order_debug 0
1337
0f389ec6
CL
1338static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1339 { return 0; }
26c02cf0
AB
1340static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1341 { return 0; }
205ab99d
CL
1342static inline void inc_slabs_node(struct kmem_cache *s, int node,
1343 int objects) {}
1344static inline void dec_slabs_node(struct kmem_cache *s, int node,
1345 int objects) {}
7d550c56 1346
02e72cc6
AR
1347#endif /* CONFIG_SLUB_DEBUG */
1348
1349/*
1350 * Hooks for other subsystems that check memory allocations. In a typical
1351 * production configuration these hooks all should produce no code at all.
1352 */
d56791b3
RB
1353static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1354{
1355 kmemleak_alloc(ptr, size, 1, flags);
505f5dcb 1356 kasan_kmalloc_large(ptr, size, flags);
d56791b3
RB
1357}
1358
ee3ce779 1359static __always_inline void kfree_hook(void *x)
d56791b3
RB
1360{
1361 kmemleak_free(x);
ee3ce779 1362 kasan_kfree_large(x, _RET_IP_);
d56791b3
RB
1363}
1364
ee3ce779 1365static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
d56791b3 1366{
80a9201a
AP
1367 void *freeptr;
1368
d56791b3 1369 kmemleak_free_recursive(x, s->flags);
7d550c56 1370
02e72cc6
AR
1371 /*
1372 * Trouble is that we may no longer disable interrupts in the fast path
1373 * So in order to make the debug calls that expect irqs to be
1374 * disabled we need to disable interrupts temporarily.
1375 */
4675ff05 1376#ifdef CONFIG_LOCKDEP
02e72cc6
AR
1377 {
1378 unsigned long flags;
1379
1380 local_irq_save(flags);
02e72cc6
AR
1381 debug_check_no_locks_freed(x, s->object_size);
1382 local_irq_restore(flags);
1383 }
1384#endif
1385 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1386 debug_check_no_obj_freed(x, s->object_size);
0316bec2 1387
80a9201a
AP
1388 freeptr = get_freepointer(s, x);
1389 /*
1390 * kasan_slab_free() may put x into memory quarantine, delaying its
1391 * reuse. In this case the object's freelist pointer is changed.
1392 */
ee3ce779 1393 kasan_slab_free(s, x, _RET_IP_);
80a9201a 1394 return freeptr;
02e72cc6 1395}
205ab99d 1396
81084651
JDB
1397static inline void slab_free_freelist_hook(struct kmem_cache *s,
1398 void *head, void *tail)
1399{
1400/*
1401 * Compiler cannot detect this function can be removed if slab_free_hook()
1402 * evaluates to nothing. Thus, catch all relevant config debug options here.
1403 */
4675ff05 1404#if defined(CONFIG_LOCKDEP) || \
81084651
JDB
1405 defined(CONFIG_DEBUG_KMEMLEAK) || \
1406 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1407 defined(CONFIG_KASAN)
1408
1409 void *object = head;
1410 void *tail_obj = tail ? : head;
80a9201a 1411 void *freeptr;
81084651
JDB
1412
1413 do {
80a9201a
AP
1414 freeptr = slab_free_hook(s, object);
1415 } while ((object != tail_obj) && (object = freeptr));
81084651
JDB
1416#endif
1417}
1418
588f8ba9
TG
1419static void setup_object(struct kmem_cache *s, struct page *page,
1420 void *object)
1421{
1422 setup_object_debug(s, page, object);
b3cbd9bf 1423 kasan_init_slab_obj(s, object);
588f8ba9
TG
1424 if (unlikely(s->ctor)) {
1425 kasan_unpoison_object_data(s, object);
1426 s->ctor(object);
1427 kasan_poison_object_data(s, object);
1428 }
1429}
1430
81819f0f
CL
1431/*
1432 * Slab allocation and freeing
1433 */
5dfb4175
VD
1434static inline struct page *alloc_slab_page(struct kmem_cache *s,
1435 gfp_t flags, int node, struct kmem_cache_order_objects oo)
65c3376a 1436{
5dfb4175 1437 struct page *page;
65c3376a
CL
1438 int order = oo_order(oo);
1439
2154a336 1440 if (node == NUMA_NO_NODE)
5dfb4175 1441 page = alloc_pages(flags, order);
65c3376a 1442 else
96db800f 1443 page = __alloc_pages_node(node, flags, order);
5dfb4175 1444
f3ccb2c4
VD
1445 if (page && memcg_charge_slab(page, flags, order, s)) {
1446 __free_pages(page, order);
1447 page = NULL;
1448 }
5dfb4175
VD
1449
1450 return page;
65c3376a
CL
1451}
1452
210e7a43
TG
1453#ifdef CONFIG_SLAB_FREELIST_RANDOM
1454/* Pre-initialize the random sequence cache */
1455static int init_cache_random_seq(struct kmem_cache *s)
1456{
1457 int err;
1458 unsigned long i, count = oo_objects(s->oo);
1459
a810007a
SR
1460 /* Bailout if already initialised */
1461 if (s->random_seq)
1462 return 0;
1463
210e7a43
TG
1464 err = cache_random_seq_create(s, count, GFP_KERNEL);
1465 if (err) {
1466 pr_err("SLUB: Unable to initialize free list for %s\n",
1467 s->name);
1468 return err;
1469 }
1470
1471 /* Transform to an offset on the set of pages */
1472 if (s->random_seq) {
1473 for (i = 0; i < count; i++)
1474 s->random_seq[i] *= s->size;
1475 }
1476 return 0;
1477}
1478
1479/* Initialize each random sequence freelist per cache */
1480static void __init init_freelist_randomization(void)
1481{
1482 struct kmem_cache *s;
1483
1484 mutex_lock(&slab_mutex);
1485
1486 list_for_each_entry(s, &slab_caches, list)
1487 init_cache_random_seq(s);
1488
1489 mutex_unlock(&slab_mutex);
1490}
1491
1492/* Get the next entry on the pre-computed freelist randomized */
1493static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1494 unsigned long *pos, void *start,
1495 unsigned long page_limit,
1496 unsigned long freelist_count)
1497{
1498 unsigned int idx;
1499
1500 /*
1501 * If the target page allocation failed, the number of objects on the
1502 * page might be smaller than the usual size defined by the cache.
1503 */
1504 do {
1505 idx = s->random_seq[*pos];
1506 *pos += 1;
1507 if (*pos >= freelist_count)
1508 *pos = 0;
1509 } while (unlikely(idx >= page_limit));
1510
1511 return (char *)start + idx;
1512}
1513
1514/* Shuffle the single linked freelist based on a random pre-computed sequence */
1515static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1516{
1517 void *start;
1518 void *cur;
1519 void *next;
1520 unsigned long idx, pos, page_limit, freelist_count;
1521
1522 if (page->objects < 2 || !s->random_seq)
1523 return false;
1524
1525 freelist_count = oo_objects(s->oo);
1526 pos = get_random_int() % freelist_count;
1527
1528 page_limit = page->objects * s->size;
1529 start = fixup_red_left(s, page_address(page));
1530
1531 /* First entry is used as the base of the freelist */
1532 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1533 freelist_count);
1534 page->freelist = cur;
1535
1536 for (idx = 1; idx < page->objects; idx++) {
1537 setup_object(s, page, cur);
1538 next = next_freelist_entry(s, page, &pos, start, page_limit,
1539 freelist_count);
1540 set_freepointer(s, cur, next);
1541 cur = next;
1542 }
1543 setup_object(s, page, cur);
1544 set_freepointer(s, cur, NULL);
1545
1546 return true;
1547}
1548#else
1549static inline int init_cache_random_seq(struct kmem_cache *s)
1550{
1551 return 0;
1552}
1553static inline void init_freelist_randomization(void) { }
1554static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1555{
1556 return false;
1557}
1558#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1559
81819f0f
CL
1560static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1561{
06428780 1562 struct page *page;
834f3d11 1563 struct kmem_cache_order_objects oo = s->oo;
ba52270d 1564 gfp_t alloc_gfp;
588f8ba9
TG
1565 void *start, *p;
1566 int idx, order;
210e7a43 1567 bool shuffle;
81819f0f 1568
7e0528da
CL
1569 flags &= gfp_allowed_mask;
1570
d0164adc 1571 if (gfpflags_allow_blocking(flags))
7e0528da
CL
1572 local_irq_enable();
1573
b7a49f0d 1574 flags |= s->allocflags;
e12ba74d 1575
ba52270d
PE
1576 /*
1577 * Let the initial higher-order allocation fail under memory pressure
1578 * so we fall-back to the minimum order allocation.
1579 */
1580 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
d0164adc 1581 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
444eb2a4 1582 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
ba52270d 1583
5dfb4175 1584 page = alloc_slab_page(s, alloc_gfp, node, oo);
65c3376a
CL
1585 if (unlikely(!page)) {
1586 oo = s->min;
80c3a998 1587 alloc_gfp = flags;
65c3376a
CL
1588 /*
1589 * Allocation may have failed due to fragmentation.
1590 * Try a lower order alloc if possible
1591 */
5dfb4175 1592 page = alloc_slab_page(s, alloc_gfp, node, oo);
588f8ba9
TG
1593 if (unlikely(!page))
1594 goto out;
1595 stat(s, ORDER_FALLBACK);
65c3376a 1596 }
5a896d9e 1597
834f3d11 1598 page->objects = oo_objects(oo);
81819f0f 1599
1f458cbf 1600 order = compound_order(page);
1b4f59e3 1601 page->slab_cache = s;
c03f94cc 1602 __SetPageSlab(page);
2f064f34 1603 if (page_is_pfmemalloc(page))
072bb0aa 1604 SetPageSlabPfmemalloc(page);
81819f0f
CL
1605
1606 start = page_address(page);
81819f0f
CL
1607
1608 if (unlikely(s->flags & SLAB_POISON))
1f458cbf 1609 memset(start, POISON_INUSE, PAGE_SIZE << order);
81819f0f 1610
0316bec2
AR
1611 kasan_poison_slab(page);
1612
210e7a43
TG
1613 shuffle = shuffle_freelist(s, page);
1614
1615 if (!shuffle) {
1616 for_each_object_idx(p, idx, s, start, page->objects) {
1617 setup_object(s, page, p);
1618 if (likely(idx < page->objects))
1619 set_freepointer(s, p, p + s->size);
1620 else
1621 set_freepointer(s, p, NULL);
1622 }
1623 page->freelist = fixup_red_left(s, start);
81819f0f 1624 }
81819f0f 1625
e6e82ea1 1626 page->inuse = page->objects;
8cb0a506 1627 page->frozen = 1;
588f8ba9 1628
81819f0f 1629out:
d0164adc 1630 if (gfpflags_allow_blocking(flags))
588f8ba9
TG
1631 local_irq_disable();
1632 if (!page)
1633 return NULL;
1634
7779f212 1635 mod_lruvec_page_state(page,
588f8ba9
TG
1636 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1637 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1638 1 << oo_order(oo));
1639
1640 inc_slabs_node(s, page_to_nid(page), page->objects);
1641
81819f0f
CL
1642 return page;
1643}
1644
588f8ba9
TG
1645static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1646{
1647 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
bacdcb34 1648 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
72baeef0
MH
1649 flags &= ~GFP_SLAB_BUG_MASK;
1650 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1651 invalid_mask, &invalid_mask, flags, &flags);
65b9de75 1652 dump_stack();
588f8ba9
TG
1653 }
1654
1655 return allocate_slab(s,
1656 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1657}
1658
81819f0f
CL
1659static void __free_slab(struct kmem_cache *s, struct page *page)
1660{
834f3d11
CL
1661 int order = compound_order(page);
1662 int pages = 1 << order;
81819f0f 1663
becfda68 1664 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
81819f0f
CL
1665 void *p;
1666
1667 slab_pad_check(s, page);
224a88be
CL
1668 for_each_object(p, s, page_address(page),
1669 page->objects)
f7cb1933 1670 check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0f
CL
1671 }
1672
7779f212 1673 mod_lruvec_page_state(page,
81819f0f
CL
1674 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1675 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1676 -pages);
81819f0f 1677
072bb0aa 1678 __ClearPageSlabPfmemalloc(page);
49bd5221 1679 __ClearPageSlab(page);
1f458cbf 1680
22b751c3 1681 page_mapcount_reset(page);
1eb5ac64
NP
1682 if (current->reclaim_state)
1683 current->reclaim_state->reclaimed_slab += pages;
27ee57c9
VD
1684 memcg_uncharge_slab(page, order, s);
1685 __free_pages(page, order);
81819f0f
CL
1686}
1687
da9a638c
LJ
1688#define need_reserve_slab_rcu \
1689 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1690
81819f0f
CL
1691static void rcu_free_slab(struct rcu_head *h)
1692{
1693 struct page *page;
1694
da9a638c
LJ
1695 if (need_reserve_slab_rcu)
1696 page = virt_to_head_page(h);
1697 else
1698 page = container_of((struct list_head *)h, struct page, lru);
1699
1b4f59e3 1700 __free_slab(page->slab_cache, page);
81819f0f
CL
1701}
1702
1703static void free_slab(struct kmem_cache *s, struct page *page)
1704{
5f0d5a3a 1705 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
da9a638c
LJ
1706 struct rcu_head *head;
1707
1708 if (need_reserve_slab_rcu) {
1709 int order = compound_order(page);
1710 int offset = (PAGE_SIZE << order) - s->reserved;
1711
1712 VM_BUG_ON(s->reserved != sizeof(*head));
1713 head = page_address(page) + offset;
1714 } else {
bc4f610d 1715 head = &page->rcu_head;
da9a638c 1716 }
81819f0f
CL
1717
1718 call_rcu(head, rcu_free_slab);
1719 } else
1720 __free_slab(s, page);
1721}
1722
1723static void discard_slab(struct kmem_cache *s, struct page *page)
1724{
205ab99d 1725 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1726 free_slab(s, page);
1727}
1728
1729/*
5cc6eee8 1730 * Management of partially allocated slabs.
81819f0f 1731 */
1e4dd946
SR
1732static inline void
1733__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
81819f0f 1734{
e95eed57 1735 n->nr_partial++;
136333d1 1736 if (tail == DEACTIVATE_TO_TAIL)
7c2e132c
CL
1737 list_add_tail(&page->lru, &n->partial);
1738 else
1739 list_add(&page->lru, &n->partial);
81819f0f
CL
1740}
1741
1e4dd946
SR
1742static inline void add_partial(struct kmem_cache_node *n,
1743 struct page *page, int tail)
62e346a8 1744{
c65c1877 1745 lockdep_assert_held(&n->list_lock);
1e4dd946
SR
1746 __add_partial(n, page, tail);
1747}
c65c1877 1748
1e4dd946
SR
1749static inline void remove_partial(struct kmem_cache_node *n,
1750 struct page *page)
1751{
1752 lockdep_assert_held(&n->list_lock);
52b4b950
DS
1753 list_del(&page->lru);
1754 n->nr_partial--;
1e4dd946
SR
1755}
1756
81819f0f 1757/*
7ced3719
CL
1758 * Remove slab from the partial list, freeze it and
1759 * return the pointer to the freelist.
81819f0f 1760 *
497b66f2 1761 * Returns a list of objects or NULL if it fails.
81819f0f 1762 */
497b66f2 1763static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1 1764 struct kmem_cache_node *n, struct page *page,
633b0764 1765 int mode, int *objects)
81819f0f 1766{
2cfb7455
CL
1767 void *freelist;
1768 unsigned long counters;
1769 struct page new;
1770
c65c1877
PZ
1771 lockdep_assert_held(&n->list_lock);
1772
2cfb7455
CL
1773 /*
1774 * Zap the freelist and set the frozen bit.
1775 * The old freelist is the list of objects for the
1776 * per cpu allocation list.
1777 */
7ced3719
CL
1778 freelist = page->freelist;
1779 counters = page->counters;
1780 new.counters = counters;
633b0764 1781 *objects = new.objects - new.inuse;
23910c50 1782 if (mode) {
7ced3719 1783 new.inuse = page->objects;
23910c50
PE
1784 new.freelist = NULL;
1785 } else {
1786 new.freelist = freelist;
1787 }
2cfb7455 1788
a0132ac0 1789 VM_BUG_ON(new.frozen);
7ced3719 1790 new.frozen = 1;
2cfb7455 1791
7ced3719 1792 if (!__cmpxchg_double_slab(s, page,
2cfb7455 1793 freelist, counters,
02d7633f 1794 new.freelist, new.counters,
7ced3719 1795 "acquire_slab"))
7ced3719 1796 return NULL;
2cfb7455
CL
1797
1798 remove_partial(n, page);
7ced3719 1799 WARN_ON(!freelist);
49e22585 1800 return freelist;
81819f0f
CL
1801}
1802
633b0764 1803static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
8ba00bb6 1804static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
49e22585 1805
81819f0f 1806/*
672bba3a 1807 * Try to allocate a partial slab from a specific node.
81819f0f 1808 */
8ba00bb6
JK
1809static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1810 struct kmem_cache_cpu *c, gfp_t flags)
81819f0f 1811{
49e22585
CL
1812 struct page *page, *page2;
1813 void *object = NULL;
633b0764
JK
1814 int available = 0;
1815 int objects;
81819f0f
CL
1816
1817 /*
1818 * Racy check. If we mistakenly see no partial slabs then we
1819 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1820 * partial slab and there is none available then get_partials()
1821 * will return NULL.
81819f0f
CL
1822 */
1823 if (!n || !n->nr_partial)
1824 return NULL;
1825
1826 spin_lock(&n->list_lock);
49e22585 1827 list_for_each_entry_safe(page, page2, &n->partial, lru) {
8ba00bb6 1828 void *t;
49e22585 1829
8ba00bb6
JK
1830 if (!pfmemalloc_match(page, flags))
1831 continue;
1832
633b0764 1833 t = acquire_slab(s, n, page, object == NULL, &objects);
49e22585
CL
1834 if (!t)
1835 break;
1836
633b0764 1837 available += objects;
12d79634 1838 if (!object) {
49e22585 1839 c->page = page;
49e22585 1840 stat(s, ALLOC_FROM_PARTIAL);
49e22585 1841 object = t;
49e22585 1842 } else {
633b0764 1843 put_cpu_partial(s, page, 0);
8028dcea 1844 stat(s, CPU_PARTIAL_NODE);
49e22585 1845 }
345c905d 1846 if (!kmem_cache_has_cpu_partial(s)
e6d0e1dc 1847 || available > slub_cpu_partial(s) / 2)
49e22585
CL
1848 break;
1849
497b66f2 1850 }
81819f0f 1851 spin_unlock(&n->list_lock);
497b66f2 1852 return object;
81819f0f
CL
1853}
1854
1855/*
672bba3a 1856 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f 1857 */
de3ec035 1858static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
acd19fd1 1859 struct kmem_cache_cpu *c)
81819f0f
CL
1860{
1861#ifdef CONFIG_NUMA
1862 struct zonelist *zonelist;
dd1a239f 1863 struct zoneref *z;
54a6eb5c
MG
1864 struct zone *zone;
1865 enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2 1866 void *object;
cc9a6c87 1867 unsigned int cpuset_mems_cookie;
81819f0f
CL
1868
1869 /*
672bba3a
CL
1870 * The defrag ratio allows a configuration of the tradeoffs between
1871 * inter node defragmentation and node local allocations. A lower
1872 * defrag_ratio increases the tendency to do local allocations
1873 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1874 *
672bba3a
CL
1875 * If the defrag_ratio is set to 0 then kmalloc() always
1876 * returns node local objects. If the ratio is higher then kmalloc()
1877 * may return off node objects because partial slabs are obtained
1878 * from other nodes and filled up.
81819f0f 1879 *
43efd3ea
LP
1880 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1881 * (which makes defrag_ratio = 1000) then every (well almost)
1882 * allocation will first attempt to defrag slab caches on other nodes.
1883 * This means scanning over all nodes to look for partial slabs which
1884 * may be expensive if we do it every time we are trying to find a slab
672bba3a 1885 * with available objects.
81819f0f 1886 */
9824601e
CL
1887 if (!s->remote_node_defrag_ratio ||
1888 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1889 return NULL;
1890
cc9a6c87 1891 do {
d26914d1 1892 cpuset_mems_cookie = read_mems_allowed_begin();
2a389610 1893 zonelist = node_zonelist(mempolicy_slab_node(), flags);
cc9a6c87
MG
1894 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1895 struct kmem_cache_node *n;
1896
1897 n = get_node(s, zone_to_nid(zone));
1898
dee2f8aa 1899 if (n && cpuset_zone_allowed(zone, flags) &&
cc9a6c87 1900 n->nr_partial > s->min_partial) {
8ba00bb6 1901 object = get_partial_node(s, n, c, flags);
cc9a6c87
MG
1902 if (object) {
1903 /*
d26914d1
MG
1904 * Don't check read_mems_allowed_retry()
1905 * here - if mems_allowed was updated in
1906 * parallel, that was a harmless race
1907 * between allocation and the cpuset
1908 * update
cc9a6c87 1909 */
cc9a6c87
MG
1910 return object;
1911 }
c0ff7453 1912 }
81819f0f 1913 }
d26914d1 1914 } while (read_mems_allowed_retry(cpuset_mems_cookie));
81819f0f
CL
1915#endif
1916 return NULL;
1917}
1918
1919/*
1920 * Get a partial page, lock it and return it.
1921 */
497b66f2 1922static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1 1923 struct kmem_cache_cpu *c)
81819f0f 1924{
497b66f2 1925 void *object;
a561ce00
JK
1926 int searchnode = node;
1927
1928 if (node == NUMA_NO_NODE)
1929 searchnode = numa_mem_id();
1930 else if (!node_present_pages(node))
1931 searchnode = node_to_mem_node(node);
81819f0f 1932
8ba00bb6 1933 object = get_partial_node(s, get_node(s, searchnode), c, flags);
497b66f2
CL
1934 if (object || node != NUMA_NO_NODE)
1935 return object;
81819f0f 1936
acd19fd1 1937 return get_any_partial(s, flags, c);
81819f0f
CL
1938}
1939
8a5ec0ba
CL
1940#ifdef CONFIG_PREEMPT
1941/*
1942 * Calculate the next globally unique transaction for disambiguiation
1943 * during cmpxchg. The transactions start with the cpu number and are then
1944 * incremented by CONFIG_NR_CPUS.
1945 */
1946#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1947#else
1948/*
1949 * No preemption supported therefore also no need to check for
1950 * different cpus.
1951 */
1952#define TID_STEP 1
1953#endif
1954
1955static inline unsigned long next_tid(unsigned long tid)
1956{
1957 return tid + TID_STEP;
1958}
1959
1960static inline unsigned int tid_to_cpu(unsigned long tid)
1961{
1962 return tid % TID_STEP;
1963}
1964
1965static inline unsigned long tid_to_event(unsigned long tid)
1966{
1967 return tid / TID_STEP;
1968}
1969
1970static inline unsigned int init_tid(int cpu)
1971{
1972 return cpu;
1973}
1974
1975static inline void note_cmpxchg_failure(const char *n,
1976 const struct kmem_cache *s, unsigned long tid)
1977{
1978#ifdef SLUB_DEBUG_CMPXCHG
1979 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1980
f9f58285 1981 pr_info("%s %s: cmpxchg redo ", n, s->name);
8a5ec0ba
CL
1982
1983#ifdef CONFIG_PREEMPT
1984 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
f9f58285 1985 pr_warn("due to cpu change %d -> %d\n",
8a5ec0ba
CL
1986 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1987 else
1988#endif
1989 if (tid_to_event(tid) != tid_to_event(actual_tid))
f9f58285 1990 pr_warn("due to cpu running other code. Event %ld->%ld\n",
8a5ec0ba
CL
1991 tid_to_event(tid), tid_to_event(actual_tid));
1992 else
f9f58285 1993 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
8a5ec0ba
CL
1994 actual_tid, tid, next_tid(tid));
1995#endif
4fdccdfb 1996 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba
CL
1997}
1998
788e1aad 1999static void init_kmem_cache_cpus(struct kmem_cache *s)
8a5ec0ba 2000{
8a5ec0ba
CL
2001 int cpu;
2002
2003 for_each_possible_cpu(cpu)
2004 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba 2005}
2cfb7455 2006
81819f0f
CL
2007/*
2008 * Remove the cpu slab
2009 */
d0e0ac97 2010static void deactivate_slab(struct kmem_cache *s, struct page *page,
d4ff6d35 2011 void *freelist, struct kmem_cache_cpu *c)
81819f0f 2012{
2cfb7455 2013 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2cfb7455
CL
2014 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2015 int lock = 0;
2016 enum slab_modes l = M_NONE, m = M_NONE;
2cfb7455 2017 void *nextfree;
136333d1 2018 int tail = DEACTIVATE_TO_HEAD;
2cfb7455
CL
2019 struct page new;
2020 struct page old;
2021
2022 if (page->freelist) {
84e554e6 2023 stat(s, DEACTIVATE_REMOTE_FREES);
136333d1 2024 tail = DEACTIVATE_TO_TAIL;
2cfb7455
CL
2025 }
2026
894b8788 2027 /*
2cfb7455
CL
2028 * Stage one: Free all available per cpu objects back
2029 * to the page freelist while it is still frozen. Leave the
2030 * last one.
2031 *
2032 * There is no need to take the list->lock because the page
2033 * is still frozen.
2034 */
2035 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2036 void *prior;
2037 unsigned long counters;
2038
2039 do {
2040 prior = page->freelist;
2041 counters = page->counters;
2042 set_freepointer(s, freelist, prior);
2043 new.counters = counters;
2044 new.inuse--;
a0132ac0 2045 VM_BUG_ON(!new.frozen);
2cfb7455 2046
1d07171c 2047 } while (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2048 prior, counters,
2049 freelist, new.counters,
2050 "drain percpu freelist"));
2051
2052 freelist = nextfree;
2053 }
2054
894b8788 2055 /*
2cfb7455
CL
2056 * Stage two: Ensure that the page is unfrozen while the
2057 * list presence reflects the actual number of objects
2058 * during unfreeze.
2059 *
2060 * We setup the list membership and then perform a cmpxchg
2061 * with the count. If there is a mismatch then the page
2062 * is not unfrozen but the page is on the wrong list.
2063 *
2064 * Then we restart the process which may have to remove
2065 * the page from the list that we just put it on again
2066 * because the number of objects in the slab may have
2067 * changed.
894b8788 2068 */
2cfb7455 2069redo:
894b8788 2070
2cfb7455
CL
2071 old.freelist = page->freelist;
2072 old.counters = page->counters;
a0132ac0 2073 VM_BUG_ON(!old.frozen);
7c2e132c 2074
2cfb7455
CL
2075 /* Determine target state of the slab */
2076 new.counters = old.counters;
2077 if (freelist) {
2078 new.inuse--;
2079 set_freepointer(s, freelist, old.freelist);
2080 new.freelist = freelist;
2081 } else
2082 new.freelist = old.freelist;
2083
2084 new.frozen = 0;
2085
8a5b20ae 2086 if (!new.inuse && n->nr_partial >= s->min_partial)
2cfb7455
CL
2087 m = M_FREE;
2088 else if (new.freelist) {
2089 m = M_PARTIAL;
2090 if (!lock) {
2091 lock = 1;
2092 /*
2093 * Taking the spinlock removes the possiblity
2094 * that acquire_slab() will see a slab page that
2095 * is frozen
2096 */
2097 spin_lock(&n->list_lock);
2098 }
2099 } else {
2100 m = M_FULL;
2101 if (kmem_cache_debug(s) && !lock) {
2102 lock = 1;
2103 /*
2104 * This also ensures that the scanning of full
2105 * slabs from diagnostic functions will not see
2106 * any frozen slabs.
2107 */
2108 spin_lock(&n->list_lock);
2109 }
2110 }
2111
2112 if (l != m) {
2113
2114 if (l == M_PARTIAL)
2115
2116 remove_partial(n, page);
2117
2118 else if (l == M_FULL)
894b8788 2119
c65c1877 2120 remove_full(s, n, page);
2cfb7455
CL
2121
2122 if (m == M_PARTIAL) {
2123
2124 add_partial(n, page, tail);
136333d1 2125 stat(s, tail);
2cfb7455
CL
2126
2127 } else if (m == M_FULL) {
894b8788 2128
2cfb7455
CL
2129 stat(s, DEACTIVATE_FULL);
2130 add_full(s, n, page);
2131
2132 }
2133 }
2134
2135 l = m;
1d07171c 2136 if (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2137 old.freelist, old.counters,
2138 new.freelist, new.counters,
2139 "unfreezing slab"))
2140 goto redo;
2141
2cfb7455
CL
2142 if (lock)
2143 spin_unlock(&n->list_lock);
2144
2145 if (m == M_FREE) {
2146 stat(s, DEACTIVATE_EMPTY);
2147 discard_slab(s, page);
2148 stat(s, FREE_SLAB);
894b8788 2149 }
d4ff6d35
WY
2150
2151 c->page = NULL;
2152 c->freelist = NULL;
81819f0f
CL
2153}
2154
d24ac77f
JK
2155/*
2156 * Unfreeze all the cpu partial slabs.
2157 *
59a09917
CL
2158 * This function must be called with interrupts disabled
2159 * for the cpu using c (or some other guarantee must be there
2160 * to guarantee no concurrent accesses).
d24ac77f 2161 */
59a09917
CL
2162static void unfreeze_partials(struct kmem_cache *s,
2163 struct kmem_cache_cpu *c)
49e22585 2164{
345c905d 2165#ifdef CONFIG_SLUB_CPU_PARTIAL
43d77867 2166 struct kmem_cache_node *n = NULL, *n2 = NULL;
9ada1934 2167 struct page *page, *discard_page = NULL;
49e22585
CL
2168
2169 while ((page = c->partial)) {
49e22585
CL
2170 struct page new;
2171 struct page old;
2172
2173 c->partial = page->next;
43d77867
JK
2174
2175 n2 = get_node(s, page_to_nid(page));
2176 if (n != n2) {
2177 if (n)
2178 spin_unlock(&n->list_lock);
2179
2180 n = n2;
2181 spin_lock(&n->list_lock);
2182 }
49e22585
CL
2183
2184 do {
2185
2186 old.freelist = page->freelist;
2187 old.counters = page->counters;
a0132ac0 2188 VM_BUG_ON(!old.frozen);
49e22585
CL
2189
2190 new.counters = old.counters;
2191 new.freelist = old.freelist;
2192
2193 new.frozen = 0;
2194
d24ac77f 2195 } while (!__cmpxchg_double_slab(s, page,
49e22585
CL
2196 old.freelist, old.counters,
2197 new.freelist, new.counters,
2198 "unfreezing slab"));
2199
8a5b20ae 2200 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
9ada1934
SL
2201 page->next = discard_page;
2202 discard_page = page;
43d77867
JK
2203 } else {
2204 add_partial(n, page, DEACTIVATE_TO_TAIL);
2205 stat(s, FREE_ADD_PARTIAL);
49e22585
CL
2206 }
2207 }
2208
2209 if (n)
2210 spin_unlock(&n->list_lock);
9ada1934
SL
2211
2212 while (discard_page) {
2213 page = discard_page;
2214 discard_page = discard_page->next;
2215
2216 stat(s, DEACTIVATE_EMPTY);
2217 discard_slab(s, page);
2218 stat(s, FREE_SLAB);
2219 }
345c905d 2220#endif
49e22585
CL
2221}
2222
2223/*
2224 * Put a page that was just frozen (in __slab_free) into a partial page
0d2d5d40 2225 * slot if available.
49e22585
CL
2226 *
2227 * If we did not find a slot then simply move all the partials to the
2228 * per node partial list.
2229 */
633b0764 2230static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
49e22585 2231{
345c905d 2232#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585
CL
2233 struct page *oldpage;
2234 int pages;
2235 int pobjects;
2236
d6e0b7fa 2237 preempt_disable();
49e22585
CL
2238 do {
2239 pages = 0;
2240 pobjects = 0;
2241 oldpage = this_cpu_read(s->cpu_slab->partial);
2242
2243 if (oldpage) {
2244 pobjects = oldpage->pobjects;
2245 pages = oldpage->pages;
2246 if (drain && pobjects > s->cpu_partial) {
2247 unsigned long flags;
2248 /*
2249 * partial array is full. Move the existing
2250 * set to the per node partial list.
2251 */
2252 local_irq_save(flags);
59a09917 2253 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
49e22585 2254 local_irq_restore(flags);
e24fc410 2255 oldpage = NULL;
49e22585
CL
2256 pobjects = 0;
2257 pages = 0;
8028dcea 2258 stat(s, CPU_PARTIAL_DRAIN);
49e22585
CL
2259 }
2260 }
2261
2262 pages++;
2263 pobjects += page->objects - page->inuse;
2264
2265 page->pages = pages;
2266 page->pobjects = pobjects;
2267 page->next = oldpage;
2268
d0e0ac97
CG
2269 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2270 != oldpage);
d6e0b7fa
VD
2271 if (unlikely(!s->cpu_partial)) {
2272 unsigned long flags;
2273
2274 local_irq_save(flags);
2275 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2276 local_irq_restore(flags);
2277 }
2278 preempt_enable();
345c905d 2279#endif
49e22585
CL
2280}
2281
dfb4f096 2282static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 2283{
84e554e6 2284 stat(s, CPUSLAB_FLUSH);
d4ff6d35 2285 deactivate_slab(s, c->page, c->freelist, c);
c17dda40
CL
2286
2287 c->tid = next_tid(c->tid);
81819f0f
CL
2288}
2289
2290/*
2291 * Flush cpu slab.
6446faa2 2292 *
81819f0f
CL
2293 * Called from IPI handler with interrupts disabled.
2294 */
0c710013 2295static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 2296{
9dfc6e68 2297 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0f 2298
49e22585
CL
2299 if (likely(c)) {
2300 if (c->page)
2301 flush_slab(s, c);
2302
59a09917 2303 unfreeze_partials(s, c);
49e22585 2304 }
81819f0f
CL
2305}
2306
2307static void flush_cpu_slab(void *d)
2308{
2309 struct kmem_cache *s = d;
81819f0f 2310
dfb4f096 2311 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
2312}
2313
a8364d55
GBY
2314static bool has_cpu_slab(int cpu, void *info)
2315{
2316 struct kmem_cache *s = info;
2317 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2318
a93cf07b 2319 return c->page || slub_percpu_partial(c);
a8364d55
GBY
2320}
2321
81819f0f
CL
2322static void flush_all(struct kmem_cache *s)
2323{
a8364d55 2324 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
81819f0f
CL
2325}
2326
a96a87bf
SAS
2327/*
2328 * Use the cpu notifier to insure that the cpu slabs are flushed when
2329 * necessary.
2330 */
2331static int slub_cpu_dead(unsigned int cpu)
2332{
2333 struct kmem_cache *s;
2334 unsigned long flags;
2335
2336 mutex_lock(&slab_mutex);
2337 list_for_each_entry(s, &slab_caches, list) {
2338 local_irq_save(flags);
2339 __flush_cpu_slab(s, cpu);
2340 local_irq_restore(flags);
2341 }
2342 mutex_unlock(&slab_mutex);
2343 return 0;
2344}
2345
dfb4f096
CL
2346/*
2347 * Check if the objects in a per cpu structure fit numa
2348 * locality expectations.
2349 */
57d437d2 2350static inline int node_match(struct page *page, int node)
dfb4f096
CL
2351{
2352#ifdef CONFIG_NUMA
4d7868e6 2353 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
dfb4f096
CL
2354 return 0;
2355#endif
2356 return 1;
2357}
2358
9a02d699 2359#ifdef CONFIG_SLUB_DEBUG
781b2ba6
PE
2360static int count_free(struct page *page)
2361{
2362 return page->objects - page->inuse;
2363}
2364
9a02d699
DR
2365static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2366{
2367 return atomic_long_read(&n->total_objects);
2368}
2369#endif /* CONFIG_SLUB_DEBUG */
2370
2371#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
781b2ba6
PE
2372static unsigned long count_partial(struct kmem_cache_node *n,
2373 int (*get_count)(struct page *))
2374{
2375 unsigned long flags;
2376 unsigned long x = 0;
2377 struct page *page;
2378
2379 spin_lock_irqsave(&n->list_lock, flags);
2380 list_for_each_entry(page, &n->partial, lru)
2381 x += get_count(page);
2382 spin_unlock_irqrestore(&n->list_lock, flags);
2383 return x;
2384}
9a02d699 2385#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
26c02cf0 2386
781b2ba6
PE
2387static noinline void
2388slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2389{
9a02d699
DR
2390#ifdef CONFIG_SLUB_DEBUG
2391 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2392 DEFAULT_RATELIMIT_BURST);
781b2ba6 2393 int node;
fa45dc25 2394 struct kmem_cache_node *n;
781b2ba6 2395
9a02d699
DR
2396 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2397 return;
2398
5b3810e5
VB
2399 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2400 nid, gfpflags, &gfpflags);
f9f58285
FF
2401 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
2402 s->name, s->object_size, s->size, oo_order(s->oo),
2403 oo_order(s->min));
781b2ba6 2404
3b0efdfa 2405 if (oo_order(s->min) > get_order(s->object_size))
f9f58285
FF
2406 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2407 s->name);
fa5ec8a1 2408
fa45dc25 2409 for_each_kmem_cache_node(s, node, n) {
781b2ba6
PE
2410 unsigned long nr_slabs;
2411 unsigned long nr_objs;
2412 unsigned long nr_free;
2413
26c02cf0
AB
2414 nr_free = count_partial(n, count_free);
2415 nr_slabs = node_nr_slabs(n);
2416 nr_objs = node_nr_objs(n);
781b2ba6 2417
f9f58285 2418 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
781b2ba6
PE
2419 node, nr_slabs, nr_objs, nr_free);
2420 }
9a02d699 2421#endif
781b2ba6
PE
2422}
2423
497b66f2
CL
2424static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2425 int node, struct kmem_cache_cpu **pc)
2426{
6faa6833 2427 void *freelist;
188fd063
CL
2428 struct kmem_cache_cpu *c = *pc;
2429 struct page *page;
497b66f2 2430
188fd063 2431 freelist = get_partial(s, flags, node, c);
497b66f2 2432
188fd063
CL
2433 if (freelist)
2434 return freelist;
2435
2436 page = new_slab(s, flags, node);
497b66f2 2437 if (page) {
7c8e0181 2438 c = raw_cpu_ptr(s->cpu_slab);
497b66f2
CL
2439 if (c->page)
2440 flush_slab(s, c);
2441
2442 /*
2443 * No other reference to the page yet so we can
2444 * muck around with it freely without cmpxchg
2445 */
6faa6833 2446 freelist = page->freelist;
497b66f2
CL
2447 page->freelist = NULL;
2448
2449 stat(s, ALLOC_SLAB);
497b66f2
CL
2450 c->page = page;
2451 *pc = c;
2452 } else
6faa6833 2453 freelist = NULL;
497b66f2 2454
6faa6833 2455 return freelist;
497b66f2
CL
2456}
2457
072bb0aa
MG
2458static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2459{
2460 if (unlikely(PageSlabPfmemalloc(page)))
2461 return gfp_pfmemalloc_allowed(gfpflags);
2462
2463 return true;
2464}
2465
213eeb9f 2466/*
d0e0ac97
CG
2467 * Check the page->freelist of a page and either transfer the freelist to the
2468 * per cpu freelist or deactivate the page.
213eeb9f
CL
2469 *
2470 * The page is still frozen if the return value is not NULL.
2471 *
2472 * If this function returns NULL then the page has been unfrozen.
d24ac77f
JK
2473 *
2474 * This function must be called with interrupt disabled.
213eeb9f
CL
2475 */
2476static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2477{
2478 struct page new;
2479 unsigned long counters;
2480 void *freelist;
2481
2482 do {
2483 freelist = page->freelist;
2484 counters = page->counters;
6faa6833 2485
213eeb9f 2486 new.counters = counters;
a0132ac0 2487 VM_BUG_ON(!new.frozen);
213eeb9f
CL
2488
2489 new.inuse = page->objects;
2490 new.frozen = freelist != NULL;
2491
d24ac77f 2492 } while (!__cmpxchg_double_slab(s, page,
213eeb9f
CL
2493 freelist, counters,
2494 NULL, new.counters,
2495 "get_freelist"));
2496
2497 return freelist;
2498}
2499
81819f0f 2500/*
894b8788
CL
2501 * Slow path. The lockless freelist is empty or we need to perform
2502 * debugging duties.
2503 *
894b8788
CL
2504 * Processing is still very fast if new objects have been freed to the
2505 * regular freelist. In that case we simply take over the regular freelist
2506 * as the lockless freelist and zap the regular freelist.
81819f0f 2507 *
894b8788
CL
2508 * If that is not working then we fall back to the partial lists. We take the
2509 * first element of the freelist as the object to allocate now and move the
2510 * rest of the freelist to the lockless freelist.
81819f0f 2511 *
894b8788 2512 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
2513 * we need to allocate a new slab. This is the slowest path since it involves
2514 * a call to the page allocator and the setup of a new slab.
a380a3c7
CL
2515 *
2516 * Version of __slab_alloc to use when we know that interrupts are
2517 * already disabled (which is the case for bulk allocation).
81819f0f 2518 */
a380a3c7 2519static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
ce71e27c 2520 unsigned long addr, struct kmem_cache_cpu *c)
81819f0f 2521{
6faa6833 2522 void *freelist;
f6e7def7 2523 struct page *page;
81819f0f 2524
f6e7def7
CL
2525 page = c->page;
2526 if (!page)
81819f0f 2527 goto new_slab;
49e22585 2528redo:
6faa6833 2529
57d437d2 2530 if (unlikely(!node_match(page, node))) {
a561ce00
JK
2531 int searchnode = node;
2532
2533 if (node != NUMA_NO_NODE && !node_present_pages(node))
2534 searchnode = node_to_mem_node(node);
2535
2536 if (unlikely(!node_match(page, searchnode))) {
2537 stat(s, ALLOC_NODE_MISMATCH);
d4ff6d35 2538 deactivate_slab(s, page, c->freelist, c);
a561ce00
JK
2539 goto new_slab;
2540 }
fc59c053 2541 }
6446faa2 2542
072bb0aa
MG
2543 /*
2544 * By rights, we should be searching for a slab page that was
2545 * PFMEMALLOC but right now, we are losing the pfmemalloc
2546 * information when the page leaves the per-cpu allocator
2547 */
2548 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
d4ff6d35 2549 deactivate_slab(s, page, c->freelist, c);
072bb0aa
MG
2550 goto new_slab;
2551 }
2552
73736e03 2553 /* must check again c->freelist in case of cpu migration or IRQ */
6faa6833
CL
2554 freelist = c->freelist;
2555 if (freelist)
73736e03 2556 goto load_freelist;
03e404af 2557
f6e7def7 2558 freelist = get_freelist(s, page);
6446faa2 2559
6faa6833 2560 if (!freelist) {
03e404af
CL
2561 c->page = NULL;
2562 stat(s, DEACTIVATE_BYPASS);
fc59c053 2563 goto new_slab;
03e404af 2564 }
6446faa2 2565
84e554e6 2566 stat(s, ALLOC_REFILL);
6446faa2 2567
894b8788 2568load_freelist:
507effea
CL
2569 /*
2570 * freelist is pointing to the list of objects to be used.
2571 * page is pointing to the page from which the objects are obtained.
2572 * That page must be frozen for per cpu allocations to work.
2573 */
a0132ac0 2574 VM_BUG_ON(!c->page->frozen);
6faa6833 2575 c->freelist = get_freepointer(s, freelist);
8a5ec0ba 2576 c->tid = next_tid(c->tid);
6faa6833 2577 return freelist;
81819f0f 2578
81819f0f 2579new_slab:
2cfb7455 2580
a93cf07b
WY
2581 if (slub_percpu_partial(c)) {
2582 page = c->page = slub_percpu_partial(c);
2583 slub_set_percpu_partial(c, page);
49e22585 2584 stat(s, CPU_PARTIAL_ALLOC);
49e22585 2585 goto redo;
81819f0f
CL
2586 }
2587
188fd063 2588 freelist = new_slab_objects(s, gfpflags, node, &c);
01ad8a7b 2589
f4697436 2590 if (unlikely(!freelist)) {
9a02d699 2591 slab_out_of_memory(s, gfpflags, node);
f4697436 2592 return NULL;
81819f0f 2593 }
2cfb7455 2594
f6e7def7 2595 page = c->page;
5091b74a 2596 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
4b6f0750 2597 goto load_freelist;
2cfb7455 2598
497b66f2 2599 /* Only entered in the debug case */
d0e0ac97
CG
2600 if (kmem_cache_debug(s) &&
2601 !alloc_debug_processing(s, page, freelist, addr))
497b66f2 2602 goto new_slab; /* Slab failed checks. Next slab needed */
894b8788 2603
d4ff6d35 2604 deactivate_slab(s, page, get_freepointer(s, freelist), c);
6faa6833 2605 return freelist;
894b8788
CL
2606}
2607
a380a3c7
CL
2608/*
2609 * Another one that disabled interrupt and compensates for possible
2610 * cpu changes by refetching the per cpu area pointer.
2611 */
2612static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2613 unsigned long addr, struct kmem_cache_cpu *c)
2614{
2615 void *p;
2616 unsigned long flags;
2617
2618 local_irq_save(flags);
2619#ifdef CONFIG_PREEMPT
2620 /*
2621 * We may have been preempted and rescheduled on a different
2622 * cpu before disabling interrupts. Need to reload cpu area
2623 * pointer.
2624 */
2625 c = this_cpu_ptr(s->cpu_slab);
2626#endif
2627
2628 p = ___slab_alloc(s, gfpflags, node, addr, c);
2629 local_irq_restore(flags);
2630 return p;
2631}
2632
894b8788
CL
2633/*
2634 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2635 * have the fastpath folded into their functions. So no function call
2636 * overhead for requests that can be satisfied on the fastpath.
2637 *
2638 * The fastpath works by first checking if the lockless freelist can be used.
2639 * If not then __slab_alloc is called for slow processing.
2640 *
2641 * Otherwise we can simply pick the next object from the lockless free list.
2642 */
2b847c3c 2643static __always_inline void *slab_alloc_node(struct kmem_cache *s,
ce71e27c 2644 gfp_t gfpflags, int node, unsigned long addr)
894b8788 2645{
03ec0ed5 2646 void *object;
dfb4f096 2647 struct kmem_cache_cpu *c;
57d437d2 2648 struct page *page;
8a5ec0ba 2649 unsigned long tid;
1f84260c 2650
8135be5a
VD
2651 s = slab_pre_alloc_hook(s, gfpflags);
2652 if (!s)
773ff60e 2653 return NULL;
8a5ec0ba 2654redo:
8a5ec0ba
CL
2655 /*
2656 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2657 * enabled. We may switch back and forth between cpus while
2658 * reading from one cpu area. That does not matter as long
2659 * as we end up on the original cpu again when doing the cmpxchg.
7cccd80b 2660 *
9aabf810
JK
2661 * We should guarantee that tid and kmem_cache are retrieved on
2662 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2663 * to check if it is matched or not.
8a5ec0ba 2664 */
9aabf810
JK
2665 do {
2666 tid = this_cpu_read(s->cpu_slab->tid);
2667 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2668 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2669 unlikely(tid != READ_ONCE(c->tid)));
9aabf810
JK
2670
2671 /*
2672 * Irqless object alloc/free algorithm used here depends on sequence
2673 * of fetching cpu_slab's data. tid should be fetched before anything
2674 * on c to guarantee that object and page associated with previous tid
2675 * won't be used with current tid. If we fetch tid first, object and
2676 * page could be one associated with next tid and our alloc/free
2677 * request will be failed. In this case, we will retry. So, no problem.
2678 */
2679 barrier();
8a5ec0ba 2680
8a5ec0ba
CL
2681 /*
2682 * The transaction ids are globally unique per cpu and per operation on
2683 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2684 * occurs on the right processor and that there was no operation on the
2685 * linked list in between.
2686 */
8a5ec0ba 2687
9dfc6e68 2688 object = c->freelist;
57d437d2 2689 page = c->page;
8eae1492 2690 if (unlikely(!object || !node_match(page, node))) {
dfb4f096 2691 object = __slab_alloc(s, gfpflags, node, addr, c);
8eae1492
DH
2692 stat(s, ALLOC_SLOWPATH);
2693 } else {
0ad9500e
ED
2694 void *next_object = get_freepointer_safe(s, object);
2695
8a5ec0ba 2696 /*
25985edc 2697 * The cmpxchg will only match if there was no additional
8a5ec0ba
CL
2698 * operation and if we are on the right processor.
2699 *
d0e0ac97
CG
2700 * The cmpxchg does the following atomically (without lock
2701 * semantics!)
8a5ec0ba
CL
2702 * 1. Relocate first pointer to the current per cpu area.
2703 * 2. Verify that tid and freelist have not been changed
2704 * 3. If they were not changed replace tid and freelist
2705 *
d0e0ac97
CG
2706 * Since this is without lock semantics the protection is only
2707 * against code executing on this cpu *not* from access by
2708 * other cpus.
8a5ec0ba 2709 */
933393f5 2710 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2711 s->cpu_slab->freelist, s->cpu_slab->tid,
2712 object, tid,
0ad9500e 2713 next_object, next_tid(tid)))) {
8a5ec0ba
CL
2714
2715 note_cmpxchg_failure("slab_alloc", s, tid);
2716 goto redo;
2717 }
0ad9500e 2718 prefetch_freepointer(s, next_object);
84e554e6 2719 stat(s, ALLOC_FASTPATH);
894b8788 2720 }
8a5ec0ba 2721
74e2134f 2722 if (unlikely(gfpflags & __GFP_ZERO) && object)
3b0efdfa 2723 memset(object, 0, s->object_size);
d07dbea4 2724
03ec0ed5 2725 slab_post_alloc_hook(s, gfpflags, 1, &object);
5a896d9e 2726
894b8788 2727 return object;
81819f0f
CL
2728}
2729
2b847c3c
EG
2730static __always_inline void *slab_alloc(struct kmem_cache *s,
2731 gfp_t gfpflags, unsigned long addr)
2732{
2733 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2734}
2735
81819f0f
CL
2736void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2737{
2b847c3c 2738 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
5b882be4 2739
d0e0ac97
CG
2740 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2741 s->size, gfpflags);
5b882be4
EGM
2742
2743 return ret;
81819f0f
CL
2744}
2745EXPORT_SYMBOL(kmem_cache_alloc);
2746
0f24f128 2747#ifdef CONFIG_TRACING
4a92379b
RK
2748void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2749{
2b847c3c 2750 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
4a92379b 2751 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
505f5dcb 2752 kasan_kmalloc(s, ret, size, gfpflags);
4a92379b
RK
2753 return ret;
2754}
2755EXPORT_SYMBOL(kmem_cache_alloc_trace);
5b882be4
EGM
2756#endif
2757
81819f0f
CL
2758#ifdef CONFIG_NUMA
2759void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2760{
2b847c3c 2761 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
5b882be4 2762
ca2b84cb 2763 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3b0efdfa 2764 s->object_size, s->size, gfpflags, node);
5b882be4
EGM
2765
2766 return ret;
81819f0f
CL
2767}
2768EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0f 2769
0f24f128 2770#ifdef CONFIG_TRACING
4a92379b 2771void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4 2772 gfp_t gfpflags,
4a92379b 2773 int node, size_t size)
5b882be4 2774{
2b847c3c 2775 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
4a92379b
RK
2776
2777 trace_kmalloc_node(_RET_IP_, ret,
2778 size, s->size, gfpflags, node);
0316bec2 2779
505f5dcb 2780 kasan_kmalloc(s, ret, size, gfpflags);
4a92379b 2781 return ret;
5b882be4 2782}
4a92379b 2783EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4 2784#endif
5d1f57e4 2785#endif
5b882be4 2786
81819f0f 2787/*
94e4d712 2788 * Slow path handling. This may still be called frequently since objects
894b8788 2789 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 2790 *
894b8788
CL
2791 * So we still attempt to reduce cache line usage. Just take the slab
2792 * lock and free the item. If there is no additional partial page
2793 * handling required then we can return immediately.
81819f0f 2794 */
894b8788 2795static void __slab_free(struct kmem_cache *s, struct page *page,
81084651
JDB
2796 void *head, void *tail, int cnt,
2797 unsigned long addr)
2798
81819f0f
CL
2799{
2800 void *prior;
2cfb7455 2801 int was_frozen;
2cfb7455
CL
2802 struct page new;
2803 unsigned long counters;
2804 struct kmem_cache_node *n = NULL;
61728d1e 2805 unsigned long uninitialized_var(flags);
81819f0f 2806
8a5ec0ba 2807 stat(s, FREE_SLOWPATH);
81819f0f 2808
19c7ff9e 2809 if (kmem_cache_debug(s) &&
282acb43 2810 !free_debug_processing(s, page, head, tail, cnt, addr))
80f08c19 2811 return;
6446faa2 2812
2cfb7455 2813 do {
837d678d
JK
2814 if (unlikely(n)) {
2815 spin_unlock_irqrestore(&n->list_lock, flags);
2816 n = NULL;
2817 }
2cfb7455
CL
2818 prior = page->freelist;
2819 counters = page->counters;
81084651 2820 set_freepointer(s, tail, prior);
2cfb7455
CL
2821 new.counters = counters;
2822 was_frozen = new.frozen;
81084651 2823 new.inuse -= cnt;
837d678d 2824 if ((!new.inuse || !prior) && !was_frozen) {
49e22585 2825
c65c1877 2826 if (kmem_cache_has_cpu_partial(s) && !prior) {
49e22585
CL
2827
2828 /*
d0e0ac97
CG
2829 * Slab was on no list before and will be
2830 * partially empty
2831 * We can defer the list move and instead
2832 * freeze it.
49e22585
CL
2833 */
2834 new.frozen = 1;
2835
c65c1877 2836 } else { /* Needs to be taken off a list */
49e22585 2837
b455def2 2838 n = get_node(s, page_to_nid(page));
49e22585
CL
2839 /*
2840 * Speculatively acquire the list_lock.
2841 * If the cmpxchg does not succeed then we may
2842 * drop the list_lock without any processing.
2843 *
2844 * Otherwise the list_lock will synchronize with
2845 * other processors updating the list of slabs.
2846 */
2847 spin_lock_irqsave(&n->list_lock, flags);
2848
2849 }
2cfb7455 2850 }
81819f0f 2851
2cfb7455
CL
2852 } while (!cmpxchg_double_slab(s, page,
2853 prior, counters,
81084651 2854 head, new.counters,
2cfb7455 2855 "__slab_free"));
81819f0f 2856
2cfb7455 2857 if (likely(!n)) {
49e22585
CL
2858
2859 /*
2860 * If we just froze the page then put it onto the
2861 * per cpu partial list.
2862 */
8028dcea 2863 if (new.frozen && !was_frozen) {
49e22585 2864 put_cpu_partial(s, page, 1);
8028dcea
AS
2865 stat(s, CPU_PARTIAL_FREE);
2866 }
49e22585 2867 /*
2cfb7455
CL
2868 * The list lock was not taken therefore no list
2869 * activity can be necessary.
2870 */
b455def2
L
2871 if (was_frozen)
2872 stat(s, FREE_FROZEN);
2873 return;
2874 }
81819f0f 2875
8a5b20ae 2876 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
837d678d
JK
2877 goto slab_empty;
2878
81819f0f 2879 /*
837d678d
JK
2880 * Objects left in the slab. If it was not on the partial list before
2881 * then add it.
81819f0f 2882 */
345c905d
JK
2883 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2884 if (kmem_cache_debug(s))
c65c1877 2885 remove_full(s, n, page);
837d678d
JK
2886 add_partial(n, page, DEACTIVATE_TO_TAIL);
2887 stat(s, FREE_ADD_PARTIAL);
8ff12cfc 2888 }
80f08c19 2889 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2890 return;
2891
2892slab_empty:
a973e9dd 2893 if (prior) {
81819f0f 2894 /*
6fbabb20 2895 * Slab on the partial list.
81819f0f 2896 */
5cc6eee8 2897 remove_partial(n, page);
84e554e6 2898 stat(s, FREE_REMOVE_PARTIAL);
c65c1877 2899 } else {
6fbabb20 2900 /* Slab must be on the full list */
c65c1877
PZ
2901 remove_full(s, n, page);
2902 }
2cfb7455 2903
80f08c19 2904 spin_unlock_irqrestore(&n->list_lock, flags);
84e554e6 2905 stat(s, FREE_SLAB);
81819f0f 2906 discard_slab(s, page);
81819f0f
CL
2907}
2908
894b8788
CL
2909/*
2910 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2911 * can perform fastpath freeing without additional function calls.
2912 *
2913 * The fastpath is only possible if we are freeing to the current cpu slab
2914 * of this processor. This typically the case if we have just allocated
2915 * the item before.
2916 *
2917 * If fastpath is not possible then fall back to __slab_free where we deal
2918 * with all sorts of special processing.
81084651
JDB
2919 *
2920 * Bulk free of a freelist with several objects (all pointing to the
2921 * same page) possible by specifying head and tail ptr, plus objects
2922 * count (cnt). Bulk free indicated by tail pointer being set.
894b8788 2923 */
80a9201a
AP
2924static __always_inline void do_slab_free(struct kmem_cache *s,
2925 struct page *page, void *head, void *tail,
2926 int cnt, unsigned long addr)
894b8788 2927{
81084651 2928 void *tail_obj = tail ? : head;
dfb4f096 2929 struct kmem_cache_cpu *c;
8a5ec0ba 2930 unsigned long tid;
8a5ec0ba
CL
2931redo:
2932 /*
2933 * Determine the currently cpus per cpu slab.
2934 * The cpu may change afterward. However that does not matter since
2935 * data is retrieved via this pointer. If we are on the same cpu
2ae44005 2936 * during the cmpxchg then the free will succeed.
8a5ec0ba 2937 */
9aabf810
JK
2938 do {
2939 tid = this_cpu_read(s->cpu_slab->tid);
2940 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2941 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2942 unlikely(tid != READ_ONCE(c->tid)));
c016b0bd 2943
9aabf810
JK
2944 /* Same with comment on barrier() in slab_alloc_node() */
2945 barrier();
c016b0bd 2946
442b06bc 2947 if (likely(page == c->page)) {
81084651 2948 set_freepointer(s, tail_obj, c->freelist);
8a5ec0ba 2949
933393f5 2950 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2951 s->cpu_slab->freelist, s->cpu_slab->tid,
2952 c->freelist, tid,
81084651 2953 head, next_tid(tid)))) {
8a5ec0ba
CL
2954
2955 note_cmpxchg_failure("slab_free", s, tid);
2956 goto redo;
2957 }
84e554e6 2958 stat(s, FREE_FASTPATH);
894b8788 2959 } else
81084651 2960 __slab_free(s, page, head, tail_obj, cnt, addr);
894b8788 2961
894b8788
CL
2962}
2963
80a9201a
AP
2964static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2965 void *head, void *tail, int cnt,
2966 unsigned long addr)
2967{
2968 slab_free_freelist_hook(s, head, tail);
2969 /*
2970 * slab_free_freelist_hook() could have put the items into quarantine.
2971 * If so, no need to free them.
2972 */
5f0d5a3a 2973 if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
80a9201a
AP
2974 return;
2975 do_slab_free(s, page, head, tail, cnt, addr);
2976}
2977
2978#ifdef CONFIG_KASAN
2979void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2980{
2981 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2982}
2983#endif
2984
81819f0f
CL
2985void kmem_cache_free(struct kmem_cache *s, void *x)
2986{
b9ce5ef4
GC
2987 s = cache_from_obj(s, x);
2988 if (!s)
79576102 2989 return;
81084651 2990 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
ca2b84cb 2991 trace_kmem_cache_free(_RET_IP_, x);
81819f0f
CL
2992}
2993EXPORT_SYMBOL(kmem_cache_free);
2994
d0ecd894 2995struct detached_freelist {
fbd02630 2996 struct page *page;
d0ecd894
JDB
2997 void *tail;
2998 void *freelist;
2999 int cnt;
376bf125 3000 struct kmem_cache *s;
d0ecd894 3001};
fbd02630 3002
d0ecd894
JDB
3003/*
3004 * This function progressively scans the array with free objects (with
3005 * a limited look ahead) and extract objects belonging to the same
3006 * page. It builds a detached freelist directly within the given
3007 * page/objects. This can happen without any need for
3008 * synchronization, because the objects are owned by running process.
3009 * The freelist is build up as a single linked list in the objects.
3010 * The idea is, that this detached freelist can then be bulk
3011 * transferred to the real freelist(s), but only requiring a single
3012 * synchronization primitive. Look ahead in the array is limited due
3013 * to performance reasons.
3014 */
376bf125
JDB
3015static inline
3016int build_detached_freelist(struct kmem_cache *s, size_t size,
3017 void **p, struct detached_freelist *df)
d0ecd894
JDB
3018{
3019 size_t first_skipped_index = 0;
3020 int lookahead = 3;
3021 void *object;
ca257195 3022 struct page *page;
fbd02630 3023
d0ecd894
JDB
3024 /* Always re-init detached_freelist */
3025 df->page = NULL;
fbd02630 3026
d0ecd894
JDB
3027 do {
3028 object = p[--size];
ca257195 3029 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
d0ecd894 3030 } while (!object && size);
3eed034d 3031
d0ecd894
JDB
3032 if (!object)
3033 return 0;
fbd02630 3034
ca257195
JDB
3035 page = virt_to_head_page(object);
3036 if (!s) {
3037 /* Handle kalloc'ed objects */
3038 if (unlikely(!PageSlab(page))) {
3039 BUG_ON(!PageCompound(page));
3040 kfree_hook(object);
4949148a 3041 __free_pages(page, compound_order(page));
ca257195
JDB
3042 p[size] = NULL; /* mark object processed */
3043 return size;
3044 }
3045 /* Derive kmem_cache from object */
3046 df->s = page->slab_cache;
3047 } else {
3048 df->s = cache_from_obj(s, object); /* Support for memcg */
3049 }
376bf125 3050
d0ecd894 3051 /* Start new detached freelist */
ca257195 3052 df->page = page;
376bf125 3053 set_freepointer(df->s, object, NULL);
d0ecd894
JDB
3054 df->tail = object;
3055 df->freelist = object;
3056 p[size] = NULL; /* mark object processed */
3057 df->cnt = 1;
3058
3059 while (size) {
3060 object = p[--size];
3061 if (!object)
3062 continue; /* Skip processed objects */
3063
3064 /* df->page is always set at this point */
3065 if (df->page == virt_to_head_page(object)) {
3066 /* Opportunity build freelist */
376bf125 3067 set_freepointer(df->s, object, df->freelist);
d0ecd894
JDB
3068 df->freelist = object;
3069 df->cnt++;
3070 p[size] = NULL; /* mark object processed */
3071
3072 continue;
fbd02630 3073 }
d0ecd894
JDB
3074
3075 /* Limit look ahead search */
3076 if (!--lookahead)
3077 break;
3078
3079 if (!first_skipped_index)
3080 first_skipped_index = size + 1;
fbd02630 3081 }
d0ecd894
JDB
3082
3083 return first_skipped_index;
3084}
3085
d0ecd894 3086/* Note that interrupts must be enabled when calling this function. */
376bf125 3087void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
d0ecd894
JDB
3088{
3089 if (WARN_ON(!size))
3090 return;
3091
3092 do {
3093 struct detached_freelist df;
3094
3095 size = build_detached_freelist(s, size, p, &df);
84582c8a 3096 if (!df.page)
d0ecd894
JDB
3097 continue;
3098
376bf125 3099 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
d0ecd894 3100 } while (likely(size));
484748f0
CL
3101}
3102EXPORT_SYMBOL(kmem_cache_free_bulk);
3103
994eb764 3104/* Note that interrupts must be enabled when calling this function. */
865762a8
JDB
3105int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3106 void **p)
484748f0 3107{
994eb764
JDB
3108 struct kmem_cache_cpu *c;
3109 int i;
3110
03ec0ed5
JDB
3111 /* memcg and kmem_cache debug support */
3112 s = slab_pre_alloc_hook(s, flags);
3113 if (unlikely(!s))
3114 return false;
994eb764
JDB
3115 /*
3116 * Drain objects in the per cpu slab, while disabling local
3117 * IRQs, which protects against PREEMPT and interrupts
3118 * handlers invoking normal fastpath.
3119 */
3120 local_irq_disable();
3121 c = this_cpu_ptr(s->cpu_slab);
3122
3123 for (i = 0; i < size; i++) {
3124 void *object = c->freelist;
3125
ebe909e0 3126 if (unlikely(!object)) {
ebe909e0
JDB
3127 /*
3128 * Invoking slow path likely have side-effect
3129 * of re-populating per CPU c->freelist
3130 */
87098373 3131 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
ebe909e0 3132 _RET_IP_, c);
87098373
CL
3133 if (unlikely(!p[i]))
3134 goto error;
3135
ebe909e0
JDB
3136 c = this_cpu_ptr(s->cpu_slab);
3137 continue; /* goto for-loop */
3138 }
994eb764
JDB
3139 c->freelist = get_freepointer(s, object);
3140 p[i] = object;
3141 }
3142 c->tid = next_tid(c->tid);
3143 local_irq_enable();
3144
3145 /* Clear memory outside IRQ disabled fastpath loop */
3146 if (unlikely(flags & __GFP_ZERO)) {
3147 int j;
3148
3149 for (j = 0; j < i; j++)
3150 memset(p[j], 0, s->object_size);
3151 }
3152
03ec0ed5
JDB
3153 /* memcg and kmem_cache debug support */
3154 slab_post_alloc_hook(s, flags, size, p);
865762a8 3155 return i;
87098373 3156error:
87098373 3157 local_irq_enable();
03ec0ed5
JDB
3158 slab_post_alloc_hook(s, flags, i, p);
3159 __kmem_cache_free_bulk(s, i, p);
865762a8 3160 return 0;
484748f0
CL
3161}
3162EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3163
3164
81819f0f 3165/*
672bba3a
CL
3166 * Object placement in a slab is made very easy because we always start at
3167 * offset 0. If we tune the size of the object to the alignment then we can
3168 * get the required alignment by putting one properly sized object after
3169 * another.
81819f0f
CL
3170 *
3171 * Notice that the allocation order determines the sizes of the per cpu
3172 * caches. Each processor has always one slab available for allocations.
3173 * Increasing the allocation order reduces the number of times that slabs
672bba3a 3174 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 3175 * locking overhead.
81819f0f
CL
3176 */
3177
3178/*
3179 * Mininum / Maximum order of slab pages. This influences locking overhead
3180 * and slab fragmentation. A higher order reduces the number of partial slabs
3181 * and increases the number of allocations possible without having to
3182 * take the list_lock.
3183 */
3184static int slub_min_order;
114e9e89 3185static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506 3186static int slub_min_objects;
81819f0f 3187
81819f0f
CL
3188/*
3189 * Calculate the order of allocation given an slab object size.
3190 *
672bba3a
CL
3191 * The order of allocation has significant impact on performance and other
3192 * system components. Generally order 0 allocations should be preferred since
3193 * order 0 does not cause fragmentation in the page allocator. Larger objects
3194 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 3195 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
3196 * would be wasted.
3197 *
3198 * In order to reach satisfactory performance we must ensure that a minimum
3199 * number of objects is in one slab. Otherwise we may generate too much
3200 * activity on the partial lists which requires taking the list_lock. This is
3201 * less a concern for large slabs though which are rarely used.
81819f0f 3202 *
672bba3a
CL
3203 * slub_max_order specifies the order where we begin to stop considering the
3204 * number of objects in a slab as critical. If we reach slub_max_order then
3205 * we try to keep the page order as low as possible. So we accept more waste
3206 * of space in favor of a small page order.
81819f0f 3207 *
672bba3a
CL
3208 * Higher order allocations also allow the placement of more objects in a
3209 * slab and thereby reduce object handling overhead. If the user has
3210 * requested a higher mininum order then we start with that one instead of
3211 * the smallest order which will fit the object.
81819f0f 3212 */
5e6d444e 3213static inline int slab_order(int size, int min_objects,
ab9a0f19 3214 int max_order, int fract_leftover, int reserved)
81819f0f
CL
3215{
3216 int order;
3217 int rem;
6300ea75 3218 int min_order = slub_min_order;
81819f0f 3219
ab9a0f19 3220 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c06 3221 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b26464 3222
9f835703 3223 for (order = max(min_order, get_order(min_objects * size + reserved));
5e6d444e 3224 order <= max_order; order++) {
81819f0f 3225
5e6d444e 3226 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 3227
ab9a0f19 3228 rem = (slab_size - reserved) % size;
81819f0f 3229
5e6d444e 3230 if (rem <= slab_size / fract_leftover)
81819f0f 3231 break;
81819f0f 3232 }
672bba3a 3233
81819f0f
CL
3234 return order;
3235}
3236
ab9a0f19 3237static inline int calculate_order(int size, int reserved)
5e6d444e
CL
3238{
3239 int order;
3240 int min_objects;
3241 int fraction;
e8120ff1 3242 int max_objects;
5e6d444e
CL
3243
3244 /*
3245 * Attempt to find best configuration for a slab. This
3246 * works by first attempting to generate a layout with
3247 * the best configuration and backing off gradually.
3248 *
422ff4d7 3249 * First we increase the acceptable waste in a slab. Then
5e6d444e
CL
3250 * we reduce the minimum objects required in a slab.
3251 */
3252 min_objects = slub_min_objects;
9b2cd506
CL
3253 if (!min_objects)
3254 min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f19 3255 max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1
ZY
3256 min_objects = min(min_objects, max_objects);
3257
5e6d444e 3258 while (min_objects > 1) {
c124f5b5 3259 fraction = 16;
5e6d444e
CL
3260 while (fraction >= 4) {
3261 order = slab_order(size, min_objects,
ab9a0f19 3262 slub_max_order, fraction, reserved);
5e6d444e
CL
3263 if (order <= slub_max_order)
3264 return order;
3265 fraction /= 2;
3266 }
5086c389 3267 min_objects--;
5e6d444e
CL
3268 }
3269
3270 /*
3271 * We were unable to place multiple objects in a slab. Now
3272 * lets see if we can place a single object there.
3273 */
ab9a0f19 3274 order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444e
CL
3275 if (order <= slub_max_order)
3276 return order;
3277
3278 /*
3279 * Doh this slab cannot be placed using slub_max_order.
3280 */
ab9a0f19 3281 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf590 3282 if (order < MAX_ORDER)
5e6d444e
CL
3283 return order;
3284 return -ENOSYS;
3285}
3286
5595cffc 3287static void
4053497d 3288init_kmem_cache_node(struct kmem_cache_node *n)
81819f0f
CL
3289{
3290 n->nr_partial = 0;
81819f0f
CL
3291 spin_lock_init(&n->list_lock);
3292 INIT_LIST_HEAD(&n->partial);
8ab1372f 3293#ifdef CONFIG_SLUB_DEBUG
0f389ec6 3294 atomic_long_set(&n->nr_slabs, 0);
02b71b70 3295 atomic_long_set(&n->total_objects, 0);
643b1138 3296 INIT_LIST_HEAD(&n->full);
8ab1372f 3297#endif
81819f0f
CL
3298}
3299
55136592 3300static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355 3301{
6c182dc0 3302 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
95a05b42 3303 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
4c93c355 3304
8a5ec0ba 3305 /*
d4d84fef
CM
3306 * Must align to double word boundary for the double cmpxchg
3307 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba 3308 */
d4d84fef
CM
3309 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3310 2 * sizeof(void *));
8a5ec0ba
CL
3311
3312 if (!s->cpu_slab)
3313 return 0;
3314
3315 init_kmem_cache_cpus(s);
4c93c355 3316
8a5ec0ba 3317 return 1;
4c93c355 3318}
4c93c355 3319
51df1142
CL
3320static struct kmem_cache *kmem_cache_node;
3321
81819f0f
CL
3322/*
3323 * No kmalloc_node yet so do it by hand. We know that this is the first
3324 * slab on the node for this slabcache. There are no concurrent accesses
3325 * possible.
3326 *
721ae22a
ZYW
3327 * Note that this function only works on the kmem_cache_node
3328 * when allocating for the kmem_cache_node. This is used for bootstrapping
4c93c355 3329 * memory on a fresh node that has no slab structures yet.
81819f0f 3330 */
55136592 3331static void early_kmem_cache_node_alloc(int node)
81819f0f
CL
3332{
3333 struct page *page;
3334 struct kmem_cache_node *n;
3335
51df1142 3336 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0f 3337
51df1142 3338 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0f
CL
3339
3340 BUG_ON(!page);
a2f92ee7 3341 if (page_to_nid(page) != node) {
f9f58285
FF
3342 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3343 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
a2f92ee7
CL
3344 }
3345
81819f0f
CL
3346 n = page->freelist;
3347 BUG_ON(!n);
51df1142 3348 page->freelist = get_freepointer(kmem_cache_node, n);
e6e82ea1 3349 page->inuse = 1;
8cb0a506 3350 page->frozen = 0;
51df1142 3351 kmem_cache_node->node[node] = n;
8ab1372f 3352#ifdef CONFIG_SLUB_DEBUG
f7cb1933 3353 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df1142 3354 init_tracking(kmem_cache_node, n);
8ab1372f 3355#endif
505f5dcb
AP
3356 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3357 GFP_KERNEL);
4053497d 3358 init_kmem_cache_node(n);
51df1142 3359 inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2 3360
67b6c900 3361 /*
1e4dd946
SR
3362 * No locks need to be taken here as it has just been
3363 * initialized and there is no concurrent access.
67b6c900 3364 */
1e4dd946 3365 __add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0f
CL
3366}
3367
3368static void free_kmem_cache_nodes(struct kmem_cache *s)
3369{
3370 int node;
fa45dc25 3371 struct kmem_cache_node *n;
81819f0f 3372
fa45dc25 3373 for_each_kmem_cache_node(s, node, n) {
81819f0f 3374 s->node[node] = NULL;
ea37df54 3375 kmem_cache_free(kmem_cache_node, n);
81819f0f
CL
3376 }
3377}
3378
52b4b950
DS
3379void __kmem_cache_release(struct kmem_cache *s)
3380{
210e7a43 3381 cache_random_seq_destroy(s);
52b4b950
DS
3382 free_percpu(s->cpu_slab);
3383 free_kmem_cache_nodes(s);
3384}
3385
55136592 3386static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0f
CL
3387{
3388 int node;
81819f0f 3389
f64dc58c 3390 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3391 struct kmem_cache_node *n;
3392
73367bd8 3393 if (slab_state == DOWN) {
55136592 3394 early_kmem_cache_node_alloc(node);
73367bd8
AD
3395 continue;
3396 }
51df1142 3397 n = kmem_cache_alloc_node(kmem_cache_node,
55136592 3398 GFP_KERNEL, node);
81819f0f 3399
73367bd8
AD
3400 if (!n) {
3401 free_kmem_cache_nodes(s);
3402 return 0;
81819f0f 3403 }
73367bd8 3404
4053497d 3405 init_kmem_cache_node(n);
ea37df54 3406 s->node[node] = n;
81819f0f
CL
3407 }
3408 return 1;
3409}
81819f0f 3410
c0bdb232 3411static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d8
DR
3412{
3413 if (min < MIN_PARTIAL)
3414 min = MIN_PARTIAL;
3415 else if (min > MAX_PARTIAL)
3416 min = MAX_PARTIAL;
3417 s->min_partial = min;
3418}
3419
e6d0e1dc
WY
3420static void set_cpu_partial(struct kmem_cache *s)
3421{
3422#ifdef CONFIG_SLUB_CPU_PARTIAL
3423 /*
3424 * cpu_partial determined the maximum number of objects kept in the
3425 * per cpu partial lists of a processor.
3426 *
3427 * Per cpu partial lists mainly contain slabs that just have one
3428 * object freed. If they are used for allocation then they can be
3429 * filled up again with minimal effort. The slab will never hit the
3430 * per node partial lists and therefore no locking will be required.
3431 *
3432 * This setting also determines
3433 *
3434 * A) The number of objects from per cpu partial slabs dumped to the
3435 * per node list when we reach the limit.
3436 * B) The number of objects in cpu partial slabs to extract from the
3437 * per node list when we run out of per cpu objects. We only fetch
3438 * 50% to keep some capacity around for frees.
3439 */
3440 if (!kmem_cache_has_cpu_partial(s))
3441 s->cpu_partial = 0;
3442 else if (s->size >= PAGE_SIZE)
3443 s->cpu_partial = 2;
3444 else if (s->size >= 1024)
3445 s->cpu_partial = 6;
3446 else if (s->size >= 256)
3447 s->cpu_partial = 13;
3448 else
3449 s->cpu_partial = 30;
3450#endif
3451}
3452
81819f0f
CL
3453/*
3454 * calculate_sizes() determines the order and the distribution of data within
3455 * a slab object.
3456 */
06b285dc 3457static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f 3458{
d50112ed 3459 slab_flags_t flags = s->flags;
80a9201a 3460 size_t size = s->object_size;
834f3d11 3461 int order;
81819f0f 3462
d8b42bf5
CL
3463 /*
3464 * Round up object size to the next word boundary. We can only
3465 * place the free pointer at word boundaries and this determines
3466 * the possible location of the free pointer.
3467 */
3468 size = ALIGN(size, sizeof(void *));
3469
3470#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3471 /*
3472 * Determine if we can poison the object itself. If the user of
3473 * the slab may touch the object after free or before allocation
3474 * then we should never poison the object itself.
3475 */
5f0d5a3a 3476 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
c59def9f 3477 !s->ctor)
81819f0f
CL
3478 s->flags |= __OBJECT_POISON;
3479 else
3480 s->flags &= ~__OBJECT_POISON;
3481
81819f0f
CL
3482
3483 /*
672bba3a 3484 * If we are Redzoning then check if there is some space between the
81819f0f 3485 * end of the object and the free pointer. If not then add an
672bba3a 3486 * additional word to have some bytes to store Redzone information.
81819f0f 3487 */
3b0efdfa 3488 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
81819f0f 3489 size += sizeof(void *);
41ecc55b 3490#endif
81819f0f
CL
3491
3492 /*
672bba3a
CL
3493 * With that we have determined the number of bytes in actual use
3494 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
3495 */
3496 s->inuse = size;
3497
5f0d5a3a 3498 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
c59def9f 3499 s->ctor)) {
81819f0f
CL
3500 /*
3501 * Relocate free pointer after the object if it is not
3502 * permitted to overwrite the first word of the object on
3503 * kmem_cache_free.
3504 *
3505 * This is the case if we do RCU, have a constructor or
3506 * destructor or are poisoning the objects.
3507 */
3508 s->offset = size;
3509 size += sizeof(void *);
3510 }
3511
c12b3c62 3512#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3513 if (flags & SLAB_STORE_USER)
3514 /*
3515 * Need to store information about allocs and frees after
3516 * the object.
3517 */
3518 size += 2 * sizeof(struct track);
80a9201a 3519#endif
81819f0f 3520
80a9201a
AP
3521 kasan_cache_create(s, &size, &s->flags);
3522#ifdef CONFIG_SLUB_DEBUG
d86bd1be 3523 if (flags & SLAB_RED_ZONE) {
81819f0f
CL
3524 /*
3525 * Add some empty padding so that we can catch
3526 * overwrites from earlier objects rather than let
3527 * tracking information or the free pointer be
0211a9c8 3528 * corrupted if a user writes before the start
81819f0f
CL
3529 * of the object.
3530 */
3531 size += sizeof(void *);
d86bd1be
JK
3532
3533 s->red_left_pad = sizeof(void *);
3534 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3535 size += s->red_left_pad;
3536 }
41ecc55b 3537#endif
672bba3a 3538
81819f0f
CL
3539 /*
3540 * SLUB stores one object immediately after another beginning from
3541 * offset 0. In order to align the objects we have to simply size
3542 * each object to conform to the alignment.
3543 */
45906855 3544 size = ALIGN(size, s->align);
81819f0f 3545 s->size = size;
06b285dc
CL
3546 if (forced_order >= 0)
3547 order = forced_order;
3548 else
ab9a0f19 3549 order = calculate_order(size, s->reserved);
81819f0f 3550
834f3d11 3551 if (order < 0)
81819f0f
CL
3552 return 0;
3553
b7a49f0d 3554 s->allocflags = 0;
834f3d11 3555 if (order)
b7a49f0d
CL
3556 s->allocflags |= __GFP_COMP;
3557
3558 if (s->flags & SLAB_CACHE_DMA)
2c59dd65 3559 s->allocflags |= GFP_DMA;
b7a49f0d
CL
3560
3561 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3562 s->allocflags |= __GFP_RECLAIMABLE;
3563
81819f0f
CL
3564 /*
3565 * Determine the number of objects per slab
3566 */
ab9a0f19
LJ
3567 s->oo = oo_make(order, size, s->reserved);
3568 s->min = oo_make(get_order(size), size, s->reserved);
205ab99d
CL
3569 if (oo_objects(s->oo) > oo_objects(s->max))
3570 s->max = s->oo;
81819f0f 3571
834f3d11 3572 return !!oo_objects(s->oo);
81819f0f
CL
3573}
3574
d50112ed 3575static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
81819f0f 3576{
8a13a4cc 3577 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
ab9a0f19 3578 s->reserved = 0;
2482ddec
KC
3579#ifdef CONFIG_SLAB_FREELIST_HARDENED
3580 s->random = get_random_long();
3581#endif
81819f0f 3582
5f0d5a3a 3583 if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
da9a638c 3584 s->reserved = sizeof(struct rcu_head);
81819f0f 3585
06b285dc 3586 if (!calculate_sizes(s, -1))
81819f0f 3587 goto error;
3de47213
DR
3588 if (disable_higher_order_debug) {
3589 /*
3590 * Disable debugging flags that store metadata if the min slab
3591 * order increased.
3592 */
3b0efdfa 3593 if (get_order(s->size) > get_order(s->object_size)) {
3de47213
DR
3594 s->flags &= ~DEBUG_METADATA_FLAGS;
3595 s->offset = 0;
3596 if (!calculate_sizes(s, -1))
3597 goto error;
3598 }
3599 }
81819f0f 3600
2565409f
HC
3601#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3602 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
149daaf3 3603 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
b789ef51
CL
3604 /* Enable fast mode */
3605 s->flags |= __CMPXCHG_DOUBLE;
3606#endif
3607
3b89d7d8
DR
3608 /*
3609 * The larger the object size is, the more pages we want on the partial
3610 * list to avoid pounding the page allocator excessively.
3611 */
49e22585
CL
3612 set_min_partial(s, ilog2(s->size) / 2);
3613
e6d0e1dc 3614 set_cpu_partial(s);
49e22585 3615
81819f0f 3616#ifdef CONFIG_NUMA
e2cb96b7 3617 s->remote_node_defrag_ratio = 1000;
81819f0f 3618#endif
210e7a43
TG
3619
3620 /* Initialize the pre-computed randomized freelist if slab is up */
3621 if (slab_state >= UP) {
3622 if (init_cache_random_seq(s))
3623 goto error;
3624 }
3625
55136592 3626 if (!init_kmem_cache_nodes(s))
dfb4f096 3627 goto error;
81819f0f 3628
55136592 3629 if (alloc_kmem_cache_cpus(s))
278b1bb1 3630 return 0;
ff12059e 3631
4c93c355 3632 free_kmem_cache_nodes(s);
81819f0f
CL
3633error:
3634 if (flags & SLAB_PANIC)
756a025f
JP
3635 panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
3636 s->name, (unsigned long)s->size, s->size,
4fd0b46e 3637 oo_order(s->oo), s->offset, (unsigned long)flags);
278b1bb1 3638 return -EINVAL;
81819f0f 3639}
81819f0f 3640
33b12c38
CL
3641static void list_slab_objects(struct kmem_cache *s, struct page *page,
3642 const char *text)
3643{
3644#ifdef CONFIG_SLUB_DEBUG
3645 void *addr = page_address(page);
3646 void *p;
a5dd5c11
NK
3647 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3648 sizeof(long), GFP_ATOMIC);
bbd7d57b
ED
3649 if (!map)
3650 return;
945cf2b6 3651 slab_err(s, page, text, s->name);
33b12c38 3652 slab_lock(page);
33b12c38 3653
5f80b13a 3654 get_map(s, page, map);
33b12c38
CL
3655 for_each_object(p, s, addr, page->objects) {
3656
3657 if (!test_bit(slab_index(p, s, addr), map)) {
f9f58285 3658 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
33b12c38
CL
3659 print_tracking(s, p);
3660 }
3661 }
3662 slab_unlock(page);
bbd7d57b 3663 kfree(map);
33b12c38
CL
3664#endif
3665}
3666
81819f0f 3667/*
599870b1 3668 * Attempt to free all partial slabs on a node.
52b4b950
DS
3669 * This is called from __kmem_cache_shutdown(). We must take list_lock
3670 * because sysfs file might still access partial list after the shutdowning.
81819f0f 3671 */
599870b1 3672static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 3673{
60398923 3674 LIST_HEAD(discard);
81819f0f
CL
3675 struct page *page, *h;
3676
52b4b950
DS
3677 BUG_ON(irqs_disabled());
3678 spin_lock_irq(&n->list_lock);
33b12c38 3679 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f 3680 if (!page->inuse) {
52b4b950 3681 remove_partial(n, page);
60398923 3682 list_add(&page->lru, &discard);
33b12c38
CL
3683 } else {
3684 list_slab_objects(s, page,
52b4b950 3685 "Objects remaining in %s on __kmem_cache_shutdown()");
599870b1 3686 }
33b12c38 3687 }
52b4b950 3688 spin_unlock_irq(&n->list_lock);
60398923
CW
3689
3690 list_for_each_entry_safe(page, h, &discard, lru)
3691 discard_slab(s, page);
81819f0f
CL
3692}
3693
3694/*
672bba3a 3695 * Release all resources used by a slab cache.
81819f0f 3696 */
52b4b950 3697int __kmem_cache_shutdown(struct kmem_cache *s)
81819f0f
CL
3698{
3699 int node;
fa45dc25 3700 struct kmem_cache_node *n;
81819f0f
CL
3701
3702 flush_all(s);
81819f0f 3703 /* Attempt to free all objects */
fa45dc25 3704 for_each_kmem_cache_node(s, node, n) {
599870b1
CL
3705 free_partial(s, n);
3706 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
3707 return 1;
3708 }
bf5eb3de 3709 sysfs_slab_remove(s);
81819f0f
CL
3710 return 0;
3711}
3712
81819f0f
CL
3713/********************************************************************
3714 * Kmalloc subsystem
3715 *******************************************************************/
3716
81819f0f
CL
3717static int __init setup_slub_min_order(char *str)
3718{
06428780 3719 get_option(&str, &slub_min_order);
81819f0f
CL
3720
3721 return 1;
3722}
3723
3724__setup("slub_min_order=", setup_slub_min_order);
3725
3726static int __init setup_slub_max_order(char *str)
3727{
06428780 3728 get_option(&str, &slub_max_order);
818cf590 3729 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0f
CL
3730
3731 return 1;
3732}
3733
3734__setup("slub_max_order=", setup_slub_max_order);
3735
3736static int __init setup_slub_min_objects(char *str)
3737{
06428780 3738 get_option(&str, &slub_min_objects);
81819f0f
CL
3739
3740 return 1;
3741}
3742
3743__setup("slub_min_objects=", setup_slub_min_objects);
3744
81819f0f
CL
3745void *__kmalloc(size_t size, gfp_t flags)
3746{
aadb4bc4 3747 struct kmem_cache *s;
5b882be4 3748 void *ret;
81819f0f 3749
95a05b42 3750 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef 3751 return kmalloc_large(size, flags);
aadb4bc4 3752
2c59dd65 3753 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3754
3755 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3756 return s;
3757
2b847c3c 3758 ret = slab_alloc(s, flags, _RET_IP_);
5b882be4 3759
ca2b84cb 3760 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4 3761
505f5dcb 3762 kasan_kmalloc(s, ret, size, flags);
0316bec2 3763
5b882be4 3764 return ret;
81819f0f
CL
3765}
3766EXPORT_SYMBOL(__kmalloc);
3767
5d1f57e4 3768#ifdef CONFIG_NUMA
f619cfe1
CL
3769static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3770{
b1eeab67 3771 struct page *page;
e4f7c0b4 3772 void *ptr = NULL;
f619cfe1 3773
75f296d9 3774 flags |= __GFP_COMP;
4949148a 3775 page = alloc_pages_node(node, flags, get_order(size));
f619cfe1 3776 if (page)
e4f7c0b4
CM
3777 ptr = page_address(page);
3778
d56791b3 3779 kmalloc_large_node_hook(ptr, size, flags);
e4f7c0b4 3780 return ptr;
f619cfe1
CL
3781}
3782
81819f0f
CL
3783void *__kmalloc_node(size_t size, gfp_t flags, int node)
3784{
aadb4bc4 3785 struct kmem_cache *s;
5b882be4 3786 void *ret;
81819f0f 3787
95a05b42 3788 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5b882be4
EGM
3789 ret = kmalloc_large_node(size, flags, node);
3790
ca2b84cb
EGM
3791 trace_kmalloc_node(_RET_IP_, ret,
3792 size, PAGE_SIZE << get_order(size),
3793 flags, node);
5b882be4
EGM
3794
3795 return ret;
3796 }
aadb4bc4 3797
2c59dd65 3798 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3799
3800 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3801 return s;
3802
2b847c3c 3803 ret = slab_alloc_node(s, flags, node, _RET_IP_);
5b882be4 3804
ca2b84cb 3805 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4 3806
505f5dcb 3807 kasan_kmalloc(s, ret, size, flags);
0316bec2 3808
5b882be4 3809 return ret;
81819f0f
CL
3810}
3811EXPORT_SYMBOL(__kmalloc_node);
3812#endif
3813
ed18adc1
KC
3814#ifdef CONFIG_HARDENED_USERCOPY
3815/*
afcc90f8
KC
3816 * Rejects incorrectly sized objects and objects that are to be copied
3817 * to/from userspace but do not fall entirely within the containing slab
3818 * cache's usercopy region.
ed18adc1
KC
3819 *
3820 * Returns NULL if check passes, otherwise const char * to name of cache
3821 * to indicate an error.
3822 */
f4e6e289
KC
3823void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3824 bool to_user)
ed18adc1
KC
3825{
3826 struct kmem_cache *s;
3827 unsigned long offset;
3828 size_t object_size;
3829
3830 /* Find object and usable object size. */
3831 s = page->slab_cache;
ed18adc1
KC
3832
3833 /* Reject impossible pointers. */
3834 if (ptr < page_address(page))
f4e6e289
KC
3835 usercopy_abort("SLUB object not in SLUB page?!", NULL,
3836 to_user, 0, n);
ed18adc1
KC
3837
3838 /* Find offset within object. */
3839 offset = (ptr - page_address(page)) % s->size;
3840
3841 /* Adjust for redzone and reject if within the redzone. */
3842 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3843 if (offset < s->red_left_pad)
f4e6e289
KC
3844 usercopy_abort("SLUB object in left red zone",
3845 s->name, to_user, offset, n);
ed18adc1
KC
3846 offset -= s->red_left_pad;
3847 }
3848
afcc90f8
KC
3849 /* Allow address range falling entirely within usercopy region. */
3850 if (offset >= s->useroffset &&
3851 offset - s->useroffset <= s->usersize &&
3852 n <= s->useroffset - offset + s->usersize)
f4e6e289 3853 return;
ed18adc1 3854
afcc90f8
KC
3855 /*
3856 * If the copy is still within the allocated object, produce
3857 * a warning instead of rejecting the copy. This is intended
3858 * to be a temporary method to find any missing usercopy
3859 * whitelists.
3860 */
3861 object_size = slab_ksize(s);
2d891fbc
KC
3862 if (usercopy_fallback &&
3863 offset <= object_size && n <= object_size - offset) {
afcc90f8
KC
3864 usercopy_warn("SLUB object", s->name, to_user, offset, n);
3865 return;
3866 }
ed18adc1 3867
f4e6e289 3868 usercopy_abort("SLUB object", s->name, to_user, offset, n);
ed18adc1
KC
3869}
3870#endif /* CONFIG_HARDENED_USERCOPY */
3871
0316bec2 3872static size_t __ksize(const void *object)
81819f0f 3873{
272c1d21 3874 struct page *page;
81819f0f 3875
ef8b4520 3876 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
3877 return 0;
3878
294a80a8 3879 page = virt_to_head_page(object);
294a80a8 3880
76994412
PE
3881 if (unlikely(!PageSlab(page))) {
3882 WARN_ON(!PageCompound(page));
294a80a8 3883 return PAGE_SIZE << compound_order(page);
76994412 3884 }
81819f0f 3885
1b4f59e3 3886 return slab_ksize(page->slab_cache);
81819f0f 3887}
0316bec2
AR
3888
3889size_t ksize(const void *object)
3890{
3891 size_t size = __ksize(object);
3892 /* We assume that ksize callers could use whole allocated area,
4ebb31a4
AP
3893 * so we need to unpoison this area.
3894 */
3895 kasan_unpoison_shadow(object, size);
0316bec2
AR
3896 return size;
3897}
b1aabecd 3898EXPORT_SYMBOL(ksize);
81819f0f
CL
3899
3900void kfree(const void *x)
3901{
81819f0f 3902 struct page *page;
5bb983b0 3903 void *object = (void *)x;
81819f0f 3904
2121db74
PE
3905 trace_kfree(_RET_IP_, x);
3906
2408c550 3907 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
3908 return;
3909
b49af68f 3910 page = virt_to_head_page(x);
aadb4bc4 3911 if (unlikely(!PageSlab(page))) {
0937502a 3912 BUG_ON(!PageCompound(page));
47adccce 3913 kfree_hook(object);
4949148a 3914 __free_pages(page, compound_order(page));
aadb4bc4
CL
3915 return;
3916 }
81084651 3917 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
81819f0f
CL
3918}
3919EXPORT_SYMBOL(kfree);
3920
832f37f5
VD
3921#define SHRINK_PROMOTE_MAX 32
3922
2086d26a 3923/*
832f37f5
VD
3924 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3925 * up most to the head of the partial lists. New allocations will then
3926 * fill those up and thus they can be removed from the partial lists.
672bba3a
CL
3927 *
3928 * The slabs with the least items are placed last. This results in them
3929 * being allocated from last increasing the chance that the last objects
3930 * are freed in them.
2086d26a 3931 */
c9fc5864 3932int __kmem_cache_shrink(struct kmem_cache *s)
2086d26a
CL
3933{
3934 int node;
3935 int i;
3936 struct kmem_cache_node *n;
3937 struct page *page;
3938 struct page *t;
832f37f5
VD
3939 struct list_head discard;
3940 struct list_head promote[SHRINK_PROMOTE_MAX];
2086d26a 3941 unsigned long flags;
ce3712d7 3942 int ret = 0;
2086d26a 3943
2086d26a 3944 flush_all(s);
fa45dc25 3945 for_each_kmem_cache_node(s, node, n) {
832f37f5
VD
3946 INIT_LIST_HEAD(&discard);
3947 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3948 INIT_LIST_HEAD(promote + i);
2086d26a
CL
3949
3950 spin_lock_irqsave(&n->list_lock, flags);
3951
3952 /*
832f37f5 3953 * Build lists of slabs to discard or promote.
2086d26a 3954 *
672bba3a
CL
3955 * Note that concurrent frees may occur while we hold the
3956 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
3957 */
3958 list_for_each_entry_safe(page, t, &n->partial, lru) {
832f37f5
VD
3959 int free = page->objects - page->inuse;
3960
3961 /* Do not reread page->inuse */
3962 barrier();
3963
3964 /* We do not keep full slabs on the list */
3965 BUG_ON(free <= 0);
3966
3967 if (free == page->objects) {
3968 list_move(&page->lru, &discard);
69cb8e6b 3969 n->nr_partial--;
832f37f5
VD
3970 } else if (free <= SHRINK_PROMOTE_MAX)
3971 list_move(&page->lru, promote + free - 1);
2086d26a
CL
3972 }
3973
2086d26a 3974 /*
832f37f5
VD
3975 * Promote the slabs filled up most to the head of the
3976 * partial list.
2086d26a 3977 */
832f37f5
VD
3978 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
3979 list_splice(promote + i, &n->partial);
2086d26a 3980
2086d26a 3981 spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b
CL
3982
3983 /* Release empty slabs */
832f37f5 3984 list_for_each_entry_safe(page, t, &discard, lru)
69cb8e6b 3985 discard_slab(s, page);
ce3712d7
VD
3986
3987 if (slabs_node(s, node))
3988 ret = 1;
2086d26a
CL
3989 }
3990
ce3712d7 3991 return ret;
2086d26a 3992}
2086d26a 3993
c9fc5864 3994#ifdef CONFIG_MEMCG
01fb58bc
TH
3995static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
3996{
50862ce7
TH
3997 /*
3998 * Called with all the locks held after a sched RCU grace period.
3999 * Even if @s becomes empty after shrinking, we can't know that @s
4000 * doesn't have allocations already in-flight and thus can't
4001 * destroy @s until the associated memcg is released.
4002 *
4003 * However, let's remove the sysfs files for empty caches here.
4004 * Each cache has a lot of interface files which aren't
4005 * particularly useful for empty draining caches; otherwise, we can
4006 * easily end up with millions of unnecessary sysfs files on
4007 * systems which have a lot of memory and transient cgroups.
4008 */
4009 if (!__kmem_cache_shrink(s))
4010 sysfs_slab_remove(s);
01fb58bc
TH
4011}
4012
c9fc5864
TH
4013void __kmemcg_cache_deactivate(struct kmem_cache *s)
4014{
4015 /*
4016 * Disable empty slabs caching. Used to avoid pinning offline
4017 * memory cgroups by kmem pages that can be freed.
4018 */
e6d0e1dc 4019 slub_set_cpu_partial(s, 0);
c9fc5864
TH
4020 s->min_partial = 0;
4021
4022 /*
4023 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
01fb58bc 4024 * we have to make sure the change is visible before shrinking.
c9fc5864 4025 */
01fb58bc 4026 slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
c9fc5864
TH
4027}
4028#endif
4029
b9049e23
YG
4030static int slab_mem_going_offline_callback(void *arg)
4031{
4032 struct kmem_cache *s;
4033
18004c5d 4034 mutex_lock(&slab_mutex);
b9049e23 4035 list_for_each_entry(s, &slab_caches, list)
c9fc5864 4036 __kmem_cache_shrink(s);
18004c5d 4037 mutex_unlock(&slab_mutex);
b9049e23
YG
4038
4039 return 0;
4040}
4041
4042static void slab_mem_offline_callback(void *arg)
4043{
4044 struct kmem_cache_node *n;
4045 struct kmem_cache *s;
4046 struct memory_notify *marg = arg;
4047 int offline_node;
4048
b9d5ab25 4049 offline_node = marg->status_change_nid_normal;
b9049e23
YG
4050
4051 /*
4052 * If the node still has available memory. we need kmem_cache_node
4053 * for it yet.
4054 */
4055 if (offline_node < 0)
4056 return;
4057
18004c5d 4058 mutex_lock(&slab_mutex);
b9049e23
YG
4059 list_for_each_entry(s, &slab_caches, list) {
4060 n = get_node(s, offline_node);
4061 if (n) {
4062 /*
4063 * if n->nr_slabs > 0, slabs still exist on the node
4064 * that is going down. We were unable to free them,
c9404c9c 4065 * and offline_pages() function shouldn't call this
b9049e23
YG
4066 * callback. So, we must fail.
4067 */
0f389ec6 4068 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
4069
4070 s->node[offline_node] = NULL;
8de66a0c 4071 kmem_cache_free(kmem_cache_node, n);
b9049e23
YG
4072 }
4073 }
18004c5d 4074 mutex_unlock(&slab_mutex);
b9049e23
YG
4075}
4076
4077static int slab_mem_going_online_callback(void *arg)
4078{
4079 struct kmem_cache_node *n;
4080 struct kmem_cache *s;
4081 struct memory_notify *marg = arg;
b9d5ab25 4082 int nid = marg->status_change_nid_normal;
b9049e23
YG
4083 int ret = 0;
4084
4085 /*
4086 * If the node's memory is already available, then kmem_cache_node is
4087 * already created. Nothing to do.
4088 */
4089 if (nid < 0)
4090 return 0;
4091
4092 /*
0121c619 4093 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
4094 * allocate a kmem_cache_node structure in order to bring the node
4095 * online.
4096 */
18004c5d 4097 mutex_lock(&slab_mutex);
b9049e23
YG
4098 list_for_each_entry(s, &slab_caches, list) {
4099 /*
4100 * XXX: kmem_cache_alloc_node will fallback to other nodes
4101 * since memory is not yet available from the node that
4102 * is brought up.
4103 */
8de66a0c 4104 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e23
YG
4105 if (!n) {
4106 ret = -ENOMEM;
4107 goto out;
4108 }
4053497d 4109 init_kmem_cache_node(n);
b9049e23
YG
4110 s->node[nid] = n;
4111 }
4112out:
18004c5d 4113 mutex_unlock(&slab_mutex);
b9049e23
YG
4114 return ret;
4115}
4116
4117static int slab_memory_callback(struct notifier_block *self,
4118 unsigned long action, void *arg)
4119{
4120 int ret = 0;
4121
4122 switch (action) {
4123 case MEM_GOING_ONLINE:
4124 ret = slab_mem_going_online_callback(arg);
4125 break;
4126 case MEM_GOING_OFFLINE:
4127 ret = slab_mem_going_offline_callback(arg);
4128 break;
4129 case MEM_OFFLINE:
4130 case MEM_CANCEL_ONLINE:
4131 slab_mem_offline_callback(arg);
4132 break;
4133 case MEM_ONLINE:
4134 case MEM_CANCEL_OFFLINE:
4135 break;
4136 }
dc19f9db
KH
4137 if (ret)
4138 ret = notifier_from_errno(ret);
4139 else
4140 ret = NOTIFY_OK;
b9049e23
YG
4141 return ret;
4142}
4143
3ac38faa
AM
4144static struct notifier_block slab_memory_callback_nb = {
4145 .notifier_call = slab_memory_callback,
4146 .priority = SLAB_CALLBACK_PRI,
4147};
b9049e23 4148
81819f0f
CL
4149/********************************************************************
4150 * Basic setup of slabs
4151 *******************************************************************/
4152
51df1142
CL
4153/*
4154 * Used for early kmem_cache structures that were allocated using
dffb4d60
CL
4155 * the page allocator. Allocate them properly then fix up the pointers
4156 * that may be pointing to the wrong kmem_cache structure.
51df1142
CL
4157 */
4158
dffb4d60 4159static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
51df1142
CL
4160{
4161 int node;
dffb4d60 4162 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
fa45dc25 4163 struct kmem_cache_node *n;
51df1142 4164
dffb4d60 4165 memcpy(s, static_cache, kmem_cache->object_size);
51df1142 4166
7d557b3c
GC
4167 /*
4168 * This runs very early, and only the boot processor is supposed to be
4169 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4170 * IPIs around.
4171 */
4172 __flush_cpu_slab(s, smp_processor_id());
fa45dc25 4173 for_each_kmem_cache_node(s, node, n) {
51df1142
CL
4174 struct page *p;
4175
fa45dc25
CL
4176 list_for_each_entry(p, &n->partial, lru)
4177 p->slab_cache = s;
51df1142 4178
607bf324 4179#ifdef CONFIG_SLUB_DEBUG
fa45dc25
CL
4180 list_for_each_entry(p, &n->full, lru)
4181 p->slab_cache = s;
51df1142 4182#endif
51df1142 4183 }
f7ce3190 4184 slab_init_memcg_params(s);
dffb4d60 4185 list_add(&s->list, &slab_caches);
510ded33 4186 memcg_link_cache(s);
dffb4d60 4187 return s;
51df1142
CL
4188}
4189
81819f0f
CL
4190void __init kmem_cache_init(void)
4191{
dffb4d60
CL
4192 static __initdata struct kmem_cache boot_kmem_cache,
4193 boot_kmem_cache_node;
51df1142 4194
fc8d8620
SG
4195 if (debug_guardpage_minorder())
4196 slub_max_order = 0;
4197
dffb4d60
CL
4198 kmem_cache_node = &boot_kmem_cache_node;
4199 kmem_cache = &boot_kmem_cache;
51df1142 4200
dffb4d60 4201 create_boot_cache(kmem_cache_node, "kmem_cache_node",
8eb8284b 4202 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
b9049e23 4203
3ac38faa 4204 register_hotmemory_notifier(&slab_memory_callback_nb);
81819f0f
CL
4205
4206 /* Able to allocate the per node structures */
4207 slab_state = PARTIAL;
4208
dffb4d60
CL
4209 create_boot_cache(kmem_cache, "kmem_cache",
4210 offsetof(struct kmem_cache, node) +
4211 nr_node_ids * sizeof(struct kmem_cache_node *),
8eb8284b 4212 SLAB_HWCACHE_ALIGN, 0, 0);
8a13a4cc 4213
dffb4d60 4214 kmem_cache = bootstrap(&boot_kmem_cache);
81819f0f 4215
51df1142
CL
4216 /*
4217 * Allocate kmem_cache_node properly from the kmem_cache slab.
4218 * kmem_cache_node is separately allocated so no need to
4219 * update any list pointers.
4220 */
dffb4d60 4221 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
51df1142
CL
4222
4223 /* Now we can use the kmem_cache to allocate kmalloc slabs */
34cc6990 4224 setup_kmalloc_cache_index_table();
f97d5f63 4225 create_kmalloc_caches(0);
81819f0f 4226
210e7a43
TG
4227 /* Setup random freelists for each cache */
4228 init_freelist_randomization();
4229
a96a87bf
SAS
4230 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4231 slub_cpu_dead);
81819f0f 4232
9b130ad5 4233 pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n",
f97d5f63 4234 cache_line_size(),
81819f0f
CL
4235 slub_min_order, slub_max_order, slub_min_objects,
4236 nr_cpu_ids, nr_node_ids);
4237}
4238
7e85ee0c
PE
4239void __init kmem_cache_init_late(void)
4240{
7e85ee0c
PE
4241}
4242
2633d7a0 4243struct kmem_cache *
a44cb944 4244__kmem_cache_alias(const char *name, size_t size, size_t align,
d50112ed 4245 slab_flags_t flags, void (*ctor)(void *))
81819f0f 4246{
426589f5 4247 struct kmem_cache *s, *c;
81819f0f 4248
a44cb944 4249 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
4250 if (s) {
4251 s->refcount++;
84d0ddd6 4252
81819f0f
CL
4253 /*
4254 * Adjust the object sizes so that we clear
4255 * the complete object on kzalloc.
4256 */
3b0efdfa 4257 s->object_size = max(s->object_size, (int)size);
81819f0f 4258 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2 4259
426589f5 4260 for_each_memcg_cache(c, s) {
84d0ddd6
VD
4261 c->object_size = s->object_size;
4262 c->inuse = max_t(int, c->inuse,
4263 ALIGN(size, sizeof(void *)));
4264 }
4265
7b8f3b66 4266 if (sysfs_slab_alias(s, name)) {
7b8f3b66 4267 s->refcount--;
cbb79694 4268 s = NULL;
7b8f3b66 4269 }
a0e1d1be 4270 }
6446faa2 4271
cbb79694
CL
4272 return s;
4273}
84c1cf62 4274
d50112ed 4275int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
cbb79694 4276{
aac3a166
PE
4277 int err;
4278
4279 err = kmem_cache_open(s, flags);
4280 if (err)
4281 return err;
20cea968 4282
45530c44
CL
4283 /* Mutex is not taken during early boot */
4284 if (slab_state <= UP)
4285 return 0;
4286
107dab5c 4287 memcg_propagate_slab_attrs(s);
aac3a166 4288 err = sysfs_slab_add(s);
aac3a166 4289 if (err)
52b4b950 4290 __kmem_cache_release(s);
20cea968 4291
aac3a166 4292 return err;
81819f0f 4293}
81819f0f 4294
ce71e27c 4295void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0f 4296{
aadb4bc4 4297 struct kmem_cache *s;
94b528d0 4298 void *ret;
aadb4bc4 4299
95a05b42 4300 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef
PE
4301 return kmalloc_large(size, gfpflags);
4302
2c59dd65 4303 s = kmalloc_slab(size, gfpflags);
81819f0f 4304
2408c550 4305 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4306 return s;
81819f0f 4307
2b847c3c 4308 ret = slab_alloc(s, gfpflags, caller);
94b528d0 4309
25985edc 4310 /* Honor the call site pointer we received. */
ca2b84cb 4311 trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d0
EGM
4312
4313 return ret;
81819f0f
CL
4314}
4315
5d1f57e4 4316#ifdef CONFIG_NUMA
81819f0f 4317void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c 4318 int node, unsigned long caller)
81819f0f 4319{
aadb4bc4 4320 struct kmem_cache *s;
94b528d0 4321 void *ret;
aadb4bc4 4322
95a05b42 4323 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
d3e14aa3
XF
4324 ret = kmalloc_large_node(size, gfpflags, node);
4325
4326 trace_kmalloc_node(caller, ret,
4327 size, PAGE_SIZE << get_order(size),
4328 gfpflags, node);
4329
4330 return ret;
4331 }
eada35ef 4332
2c59dd65 4333 s = kmalloc_slab(size, gfpflags);
81819f0f 4334
2408c550 4335 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4336 return s;
81819f0f 4337
2b847c3c 4338 ret = slab_alloc_node(s, gfpflags, node, caller);
94b528d0 4339
25985edc 4340 /* Honor the call site pointer we received. */
ca2b84cb 4341 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d0
EGM
4342
4343 return ret;
81819f0f 4344}
5d1f57e4 4345#endif
81819f0f 4346
ab4d5ed5 4347#ifdef CONFIG_SYSFS
205ab99d
CL
4348static int count_inuse(struct page *page)
4349{
4350 return page->inuse;
4351}
4352
4353static int count_total(struct page *page)
4354{
4355 return page->objects;
4356}
ab4d5ed5 4357#endif
205ab99d 4358
ab4d5ed5 4359#ifdef CONFIG_SLUB_DEBUG
434e245d
CL
4360static int validate_slab(struct kmem_cache *s, struct page *page,
4361 unsigned long *map)
53e15af0
CL
4362{
4363 void *p;
a973e9dd 4364 void *addr = page_address(page);
53e15af0
CL
4365
4366 if (!check_slab(s, page) ||
4367 !on_freelist(s, page, NULL))
4368 return 0;
4369
4370 /* Now we know that a valid freelist exists */
39b26464 4371 bitmap_zero(map, page->objects);
53e15af0 4372
5f80b13a
CL
4373 get_map(s, page, map);
4374 for_each_object(p, s, addr, page->objects) {
4375 if (test_bit(slab_index(p, s, addr), map))
4376 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4377 return 0;
53e15af0
CL
4378 }
4379
224a88be 4380 for_each_object(p, s, addr, page->objects)
7656c72b 4381 if (!test_bit(slab_index(p, s, addr), map))
37d57443 4382 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af0
CL
4383 return 0;
4384 return 1;
4385}
4386
434e245d
CL
4387static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4388 unsigned long *map)
53e15af0 4389{
881db7fb
CL
4390 slab_lock(page);
4391 validate_slab(s, page, map);
4392 slab_unlock(page);
53e15af0
CL
4393}
4394
434e245d
CL
4395static int validate_slab_node(struct kmem_cache *s,
4396 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
4397{
4398 unsigned long count = 0;
4399 struct page *page;
4400 unsigned long flags;
4401
4402 spin_lock_irqsave(&n->list_lock, flags);
4403
4404 list_for_each_entry(page, &n->partial, lru) {
434e245d 4405 validate_slab_slab(s, page, map);
53e15af0
CL
4406 count++;
4407 }
4408 if (count != n->nr_partial)
f9f58285
FF
4409 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4410 s->name, count, n->nr_partial);
53e15af0
CL
4411
4412 if (!(s->flags & SLAB_STORE_USER))
4413 goto out;
4414
4415 list_for_each_entry(page, &n->full, lru) {
434e245d 4416 validate_slab_slab(s, page, map);
53e15af0
CL
4417 count++;
4418 }
4419 if (count != atomic_long_read(&n->nr_slabs))
f9f58285
FF
4420 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4421 s->name, count, atomic_long_read(&n->nr_slabs));
53e15af0
CL
4422
4423out:
4424 spin_unlock_irqrestore(&n->list_lock, flags);
4425 return count;
4426}
4427
434e245d 4428static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
4429{
4430 int node;
4431 unsigned long count = 0;
205ab99d 4432 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245d 4433 sizeof(unsigned long), GFP_KERNEL);
fa45dc25 4434 struct kmem_cache_node *n;
434e245d
CL
4435
4436 if (!map)
4437 return -ENOMEM;
53e15af0
CL
4438
4439 flush_all(s);
fa45dc25 4440 for_each_kmem_cache_node(s, node, n)
434e245d 4441 count += validate_slab_node(s, n, map);
434e245d 4442 kfree(map);
53e15af0
CL
4443 return count;
4444}
88a420e4 4445/*
672bba3a 4446 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
4447 * and freed.
4448 */
4449
4450struct location {
4451 unsigned long count;
ce71e27c 4452 unsigned long addr;
45edfa58
CL
4453 long long sum_time;
4454 long min_time;
4455 long max_time;
4456 long min_pid;
4457 long max_pid;
174596a0 4458 DECLARE_BITMAP(cpus, NR_CPUS);
45edfa58 4459 nodemask_t nodes;
88a420e4
CL
4460};
4461
4462struct loc_track {
4463 unsigned long max;
4464 unsigned long count;
4465 struct location *loc;
4466};
4467
4468static void free_loc_track(struct loc_track *t)
4469{
4470 if (t->max)
4471 free_pages((unsigned long)t->loc,
4472 get_order(sizeof(struct location) * t->max));
4473}
4474
68dff6a9 4475static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
4476{
4477 struct location *l;
4478 int order;
4479
88a420e4
CL
4480 order = get_order(sizeof(struct location) * max);
4481
68dff6a9 4482 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
4483 if (!l)
4484 return 0;
4485
4486 if (t->count) {
4487 memcpy(l, t->loc, sizeof(struct location) * t->count);
4488 free_loc_track(t);
4489 }
4490 t->max = max;
4491 t->loc = l;
4492 return 1;
4493}
4494
4495static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 4496 const struct track *track)
88a420e4
CL
4497{
4498 long start, end, pos;
4499 struct location *l;
ce71e27c 4500 unsigned long caddr;
45edfa58 4501 unsigned long age = jiffies - track->when;
88a420e4
CL
4502
4503 start = -1;
4504 end = t->count;
4505
4506 for ( ; ; ) {
4507 pos = start + (end - start + 1) / 2;
4508
4509 /*
4510 * There is nothing at "end". If we end up there
4511 * we need to add something to before end.
4512 */
4513 if (pos == end)
4514 break;
4515
4516 caddr = t->loc[pos].addr;
45edfa58
CL
4517 if (track->addr == caddr) {
4518
4519 l = &t->loc[pos];
4520 l->count++;
4521 if (track->when) {
4522 l->sum_time += age;
4523 if (age < l->min_time)
4524 l->min_time = age;
4525 if (age > l->max_time)
4526 l->max_time = age;
4527
4528 if (track->pid < l->min_pid)
4529 l->min_pid = track->pid;
4530 if (track->pid > l->max_pid)
4531 l->max_pid = track->pid;
4532
174596a0
RR
4533 cpumask_set_cpu(track->cpu,
4534 to_cpumask(l->cpus));
45edfa58
CL
4535 }
4536 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4537 return 1;
4538 }
4539
45edfa58 4540 if (track->addr < caddr)
88a420e4
CL
4541 end = pos;
4542 else
4543 start = pos;
4544 }
4545
4546 /*
672bba3a 4547 * Not found. Insert new tracking element.
88a420e4 4548 */
68dff6a9 4549 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
4550 return 0;
4551
4552 l = t->loc + pos;
4553 if (pos < t->count)
4554 memmove(l + 1, l,
4555 (t->count - pos) * sizeof(struct location));
4556 t->count++;
4557 l->count = 1;
45edfa58
CL
4558 l->addr = track->addr;
4559 l->sum_time = age;
4560 l->min_time = age;
4561 l->max_time = age;
4562 l->min_pid = track->pid;
4563 l->max_pid = track->pid;
174596a0
RR
4564 cpumask_clear(to_cpumask(l->cpus));
4565 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa58
CL
4566 nodes_clear(l->nodes);
4567 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4568 return 1;
4569}
4570
4571static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57b 4572 struct page *page, enum track_item alloc,
a5dd5c11 4573 unsigned long *map)
88a420e4 4574{
a973e9dd 4575 void *addr = page_address(page);
88a420e4
CL
4576 void *p;
4577
39b26464 4578 bitmap_zero(map, page->objects);
5f80b13a 4579 get_map(s, page, map);
88a420e4 4580
224a88be 4581 for_each_object(p, s, addr, page->objects)
45edfa58
CL
4582 if (!test_bit(slab_index(p, s, addr), map))
4583 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
4584}
4585
4586static int list_locations(struct kmem_cache *s, char *buf,
4587 enum track_item alloc)
4588{
e374d483 4589 int len = 0;
88a420e4 4590 unsigned long i;
68dff6a9 4591 struct loc_track t = { 0, 0, NULL };
88a420e4 4592 int node;
bbd7d57b
ED
4593 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4594 sizeof(unsigned long), GFP_KERNEL);
fa45dc25 4595 struct kmem_cache_node *n;
88a420e4 4596
bbd7d57b 4597 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
0ee931c4 4598 GFP_KERNEL)) {
bbd7d57b 4599 kfree(map);
68dff6a9 4600 return sprintf(buf, "Out of memory\n");
bbd7d57b 4601 }
88a420e4
CL
4602 /* Push back cpu slabs */
4603 flush_all(s);
4604
fa45dc25 4605 for_each_kmem_cache_node(s, node, n) {
88a420e4
CL
4606 unsigned long flags;
4607 struct page *page;
4608
9e86943b 4609 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
4610 continue;
4611
4612 spin_lock_irqsave(&n->list_lock, flags);
4613 list_for_each_entry(page, &n->partial, lru)
bbd7d57b 4614 process_slab(&t, s, page, alloc, map);
88a420e4 4615 list_for_each_entry(page, &n->full, lru)
bbd7d57b 4616 process_slab(&t, s, page, alloc, map);
88a420e4
CL
4617 spin_unlock_irqrestore(&n->list_lock, flags);
4618 }
4619
4620 for (i = 0; i < t.count; i++) {
45edfa58 4621 struct location *l = &t.loc[i];
88a420e4 4622
9c246247 4623 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4 4624 break;
e374d483 4625 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
4626
4627 if (l->addr)
62c70bce 4628 len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4 4629 else
e374d483 4630 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
4631
4632 if (l->sum_time != l->min_time) {
e374d483 4633 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
4634 l->min_time,
4635 (long)div_u64(l->sum_time, l->count),
4636 l->max_time);
45edfa58 4637 } else
e374d483 4638 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
4639 l->min_time);
4640
4641 if (l->min_pid != l->max_pid)
e374d483 4642 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
4643 l->min_pid, l->max_pid);
4644 else
e374d483 4645 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
4646 l->min_pid);
4647
174596a0
RR
4648 if (num_online_cpus() > 1 &&
4649 !cpumask_empty(to_cpumask(l->cpus)) &&
5024c1d7
TH
4650 len < PAGE_SIZE - 60)
4651 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4652 " cpus=%*pbl",
4653 cpumask_pr_args(to_cpumask(l->cpus)));
45edfa58 4654
62bc62a8 4655 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
5024c1d7
TH
4656 len < PAGE_SIZE - 60)
4657 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4658 " nodes=%*pbl",
4659 nodemask_pr_args(&l->nodes));
45edfa58 4660
e374d483 4661 len += sprintf(buf + len, "\n");
88a420e4
CL
4662 }
4663
4664 free_loc_track(&t);
bbd7d57b 4665 kfree(map);
88a420e4 4666 if (!t.count)
e374d483
HH
4667 len += sprintf(buf, "No data\n");
4668 return len;
88a420e4 4669}
ab4d5ed5 4670#endif
88a420e4 4671
a5a84755 4672#ifdef SLUB_RESILIENCY_TEST
c07b8183 4673static void __init resiliency_test(void)
a5a84755
CL
4674{
4675 u8 *p;
4676
95a05b42 4677 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
a5a84755 4678
f9f58285
FF
4679 pr_err("SLUB resiliency testing\n");
4680 pr_err("-----------------------\n");
4681 pr_err("A. Corruption after allocation\n");
a5a84755
CL
4682
4683 p = kzalloc(16, GFP_KERNEL);
4684 p[16] = 0x12;
f9f58285
FF
4685 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4686 p + 16);
a5a84755
CL
4687
4688 validate_slab_cache(kmalloc_caches[4]);
4689
4690 /* Hmmm... The next two are dangerous */
4691 p = kzalloc(32, GFP_KERNEL);
4692 p[32 + sizeof(void *)] = 0x34;
f9f58285
FF
4693 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4694 p);
4695 pr_err("If allocated object is overwritten then not detectable\n\n");
a5a84755
CL
4696
4697 validate_slab_cache(kmalloc_caches[5]);
4698 p = kzalloc(64, GFP_KERNEL);
4699 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4700 *p = 0x56;
f9f58285
FF
4701 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4702 p);
4703 pr_err("If allocated object is overwritten then not detectable\n\n");
a5a84755
CL
4704 validate_slab_cache(kmalloc_caches[6]);
4705
f9f58285 4706 pr_err("\nB. Corruption after free\n");
a5a84755
CL
4707 p = kzalloc(128, GFP_KERNEL);
4708 kfree(p);
4709 *p = 0x78;
f9f58285 4710 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
a5a84755
CL
4711 validate_slab_cache(kmalloc_caches[7]);
4712
4713 p = kzalloc(256, GFP_KERNEL);
4714 kfree(p);
4715 p[50] = 0x9a;
f9f58285 4716 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
a5a84755
CL
4717 validate_slab_cache(kmalloc_caches[8]);
4718
4719 p = kzalloc(512, GFP_KERNEL);
4720 kfree(p);
4721 p[512] = 0xab;
f9f58285 4722 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
a5a84755
CL
4723 validate_slab_cache(kmalloc_caches[9]);
4724}
4725#else
4726#ifdef CONFIG_SYSFS
4727static void resiliency_test(void) {};
4728#endif
4729#endif
4730
ab4d5ed5 4731#ifdef CONFIG_SYSFS
81819f0f 4732enum slab_stat_type {
205ab99d
CL
4733 SL_ALL, /* All slabs */
4734 SL_PARTIAL, /* Only partially allocated slabs */
4735 SL_CPU, /* Only slabs used for cpu caches */
4736 SL_OBJECTS, /* Determine allocated objects not slabs */
4737 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
4738};
4739
205ab99d 4740#define SO_ALL (1 << SL_ALL)
81819f0f
CL
4741#define SO_PARTIAL (1 << SL_PARTIAL)
4742#define SO_CPU (1 << SL_CPU)
4743#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 4744#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 4745
1663f26d
TH
4746#ifdef CONFIG_MEMCG
4747static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4748
4749static int __init setup_slub_memcg_sysfs(char *str)
4750{
4751 int v;
4752
4753 if (get_option(&str, &v) > 0)
4754 memcg_sysfs_enabled = v;
4755
4756 return 1;
4757}
4758
4759__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4760#endif
4761
62e5c4b4
CG
4762static ssize_t show_slab_objects(struct kmem_cache *s,
4763 char *buf, unsigned long flags)
81819f0f
CL
4764{
4765 unsigned long total = 0;
81819f0f
CL
4766 int node;
4767 int x;
4768 unsigned long *nodes;
81819f0f 4769
e35e1a97 4770 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4
CG
4771 if (!nodes)
4772 return -ENOMEM;
81819f0f 4773
205ab99d
CL
4774 if (flags & SO_CPU) {
4775 int cpu;
81819f0f 4776
205ab99d 4777 for_each_possible_cpu(cpu) {
d0e0ac97
CG
4778 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4779 cpu);
ec3ab083 4780 int node;
49e22585 4781 struct page *page;
dfb4f096 4782
4db0c3c2 4783 page = READ_ONCE(c->page);
ec3ab083
CL
4784 if (!page)
4785 continue;
205ab99d 4786
ec3ab083
CL
4787 node = page_to_nid(page);
4788 if (flags & SO_TOTAL)
4789 x = page->objects;
4790 else if (flags & SO_OBJECTS)
4791 x = page->inuse;
4792 else
4793 x = 1;
49e22585 4794
ec3ab083
CL
4795 total += x;
4796 nodes[node] += x;
4797
a93cf07b 4798 page = slub_percpu_partial_read_once(c);
49e22585 4799 if (page) {
8afb1474
LZ
4800 node = page_to_nid(page);
4801 if (flags & SO_TOTAL)
4802 WARN_ON_ONCE(1);
4803 else if (flags & SO_OBJECTS)
4804 WARN_ON_ONCE(1);
4805 else
4806 x = page->pages;
bc6697d8
ED
4807 total += x;
4808 nodes[node] += x;
49e22585 4809 }
81819f0f
CL
4810 }
4811 }
4812
bfc8c901 4813 get_online_mems();
ab4d5ed5 4814#ifdef CONFIG_SLUB_DEBUG
205ab99d 4815 if (flags & SO_ALL) {
fa45dc25
CL
4816 struct kmem_cache_node *n;
4817
4818 for_each_kmem_cache_node(s, node, n) {
205ab99d 4819
d0e0ac97
CG
4820 if (flags & SO_TOTAL)
4821 x = atomic_long_read(&n->total_objects);
4822 else if (flags & SO_OBJECTS)
4823 x = atomic_long_read(&n->total_objects) -
4824 count_partial(n, count_free);
81819f0f 4825 else
205ab99d 4826 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
4827 total += x;
4828 nodes[node] += x;
4829 }
4830
ab4d5ed5
CL
4831 } else
4832#endif
4833 if (flags & SO_PARTIAL) {
fa45dc25 4834 struct kmem_cache_node *n;
81819f0f 4835
fa45dc25 4836 for_each_kmem_cache_node(s, node, n) {
205ab99d
CL
4837 if (flags & SO_TOTAL)
4838 x = count_partial(n, count_total);
4839 else if (flags & SO_OBJECTS)
4840 x = count_partial(n, count_inuse);
81819f0f 4841 else
205ab99d 4842 x = n->nr_partial;
81819f0f
CL
4843 total += x;
4844 nodes[node] += x;
4845 }
4846 }
81819f0f
CL
4847 x = sprintf(buf, "%lu", total);
4848#ifdef CONFIG_NUMA
fa45dc25 4849 for (node = 0; node < nr_node_ids; node++)
81819f0f
CL
4850 if (nodes[node])
4851 x += sprintf(buf + x, " N%d=%lu",
4852 node, nodes[node]);
4853#endif
bfc8c901 4854 put_online_mems();
81819f0f
CL
4855 kfree(nodes);
4856 return x + sprintf(buf + x, "\n");
4857}
4858
ab4d5ed5 4859#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
4860static int any_slab_objects(struct kmem_cache *s)
4861{
4862 int node;
fa45dc25 4863 struct kmem_cache_node *n;
81819f0f 4864
fa45dc25 4865 for_each_kmem_cache_node(s, node, n)
4ea33e2d 4866 if (atomic_long_read(&n->total_objects))
81819f0f 4867 return 1;
fa45dc25 4868
81819f0f
CL
4869 return 0;
4870}
ab4d5ed5 4871#endif
81819f0f
CL
4872
4873#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf 4874#define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0f
CL
4875
4876struct slab_attribute {
4877 struct attribute attr;
4878 ssize_t (*show)(struct kmem_cache *s, char *buf);
4879 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4880};
4881
4882#define SLAB_ATTR_RO(_name) \
ab067e99
VK
4883 static struct slab_attribute _name##_attr = \
4884 __ATTR(_name, 0400, _name##_show, NULL)
81819f0f
CL
4885
4886#define SLAB_ATTR(_name) \
4887 static struct slab_attribute _name##_attr = \
ab067e99 4888 __ATTR(_name, 0600, _name##_show, _name##_store)
81819f0f 4889
81819f0f
CL
4890static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4891{
4892 return sprintf(buf, "%d\n", s->size);
4893}
4894SLAB_ATTR_RO(slab_size);
4895
4896static ssize_t align_show(struct kmem_cache *s, char *buf)
4897{
4898 return sprintf(buf, "%d\n", s->align);
4899}
4900SLAB_ATTR_RO(align);
4901
4902static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4903{
3b0efdfa 4904 return sprintf(buf, "%d\n", s->object_size);
81819f0f
CL
4905}
4906SLAB_ATTR_RO(object_size);
4907
4908static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4909{
834f3d11 4910 return sprintf(buf, "%d\n", oo_objects(s->oo));
81819f0f
CL
4911}
4912SLAB_ATTR_RO(objs_per_slab);
4913
06b285dc
CL
4914static ssize_t order_store(struct kmem_cache *s,
4915 const char *buf, size_t length)
4916{
0121c619
CL
4917 unsigned long order;
4918 int err;
4919
3dbb95f7 4920 err = kstrtoul(buf, 10, &order);
0121c619
CL
4921 if (err)
4922 return err;
06b285dc
CL
4923
4924 if (order > slub_max_order || order < slub_min_order)
4925 return -EINVAL;
4926
4927 calculate_sizes(s, order);
4928 return length;
4929}
4930
81819f0f
CL
4931static ssize_t order_show(struct kmem_cache *s, char *buf)
4932{
834f3d11 4933 return sprintf(buf, "%d\n", oo_order(s->oo));
81819f0f 4934}
06b285dc 4935SLAB_ATTR(order);
81819f0f 4936
73d342b1
DR
4937static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4938{
4939 return sprintf(buf, "%lu\n", s->min_partial);
4940}
4941
4942static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4943 size_t length)
4944{
4945 unsigned long min;
4946 int err;
4947
3dbb95f7 4948 err = kstrtoul(buf, 10, &min);
73d342b1
DR
4949 if (err)
4950 return err;
4951
c0bdb232 4952 set_min_partial(s, min);
73d342b1
DR
4953 return length;
4954}
4955SLAB_ATTR(min_partial);
4956
49e22585
CL
4957static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4958{
e6d0e1dc 4959 return sprintf(buf, "%u\n", slub_cpu_partial(s));
49e22585
CL
4960}
4961
4962static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4963 size_t length)
4964{
4965 unsigned long objects;
4966 int err;
4967
3dbb95f7 4968 err = kstrtoul(buf, 10, &objects);
49e22585
CL
4969 if (err)
4970 return err;
345c905d 4971 if (objects && !kmem_cache_has_cpu_partial(s))
74ee4ef1 4972 return -EINVAL;
49e22585 4973
e6d0e1dc 4974 slub_set_cpu_partial(s, objects);
49e22585
CL
4975 flush_all(s);
4976 return length;
4977}
4978SLAB_ATTR(cpu_partial);
4979
81819f0f
CL
4980static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4981{
62c70bce
JP
4982 if (!s->ctor)
4983 return 0;
4984 return sprintf(buf, "%pS\n", s->ctor);
81819f0f
CL
4985}
4986SLAB_ATTR_RO(ctor);
4987
81819f0f
CL
4988static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4989{
4307c14f 4990 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
81819f0f
CL
4991}
4992SLAB_ATTR_RO(aliases);
4993
81819f0f
CL
4994static ssize_t partial_show(struct kmem_cache *s, char *buf)
4995{
d9acf4b7 4996 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
4997}
4998SLAB_ATTR_RO(partial);
4999
5000static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5001{
d9acf4b7 5002 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
5003}
5004SLAB_ATTR_RO(cpu_slabs);
5005
5006static ssize_t objects_show(struct kmem_cache *s, char *buf)
5007{
205ab99d 5008 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
5009}
5010SLAB_ATTR_RO(objects);
5011
205ab99d
CL
5012static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5013{
5014 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5015}
5016SLAB_ATTR_RO(objects_partial);
5017
49e22585
CL
5018static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5019{
5020 int objects = 0;
5021 int pages = 0;
5022 int cpu;
5023 int len;
5024
5025 for_each_online_cpu(cpu) {
a93cf07b
WY
5026 struct page *page;
5027
5028 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5029
5030 if (page) {
5031 pages += page->pages;
5032 objects += page->pobjects;
5033 }
5034 }
5035
5036 len = sprintf(buf, "%d(%d)", objects, pages);
5037
5038#ifdef CONFIG_SMP
5039 for_each_online_cpu(cpu) {
a93cf07b
WY
5040 struct page *page;
5041
5042 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5043
5044 if (page && len < PAGE_SIZE - 20)
5045 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5046 page->pobjects, page->pages);
5047 }
5048#endif
5049 return len + sprintf(buf + len, "\n");
5050}
5051SLAB_ATTR_RO(slabs_cpu_partial);
5052
a5a84755
CL
5053static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5054{
5055 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5056}
5057
5058static ssize_t reclaim_account_store(struct kmem_cache *s,
5059 const char *buf, size_t length)
5060{
5061 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5062 if (buf[0] == '1')
5063 s->flags |= SLAB_RECLAIM_ACCOUNT;
5064 return length;
5065}
5066SLAB_ATTR(reclaim_account);
5067
5068static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5069{
5070 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5071}
5072SLAB_ATTR_RO(hwcache_align);
5073
5074#ifdef CONFIG_ZONE_DMA
5075static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5076{
5077 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5078}
5079SLAB_ATTR_RO(cache_dma);
5080#endif
5081
8eb8284b
DW
5082static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5083{
5084 return sprintf(buf, "%zu\n", s->usersize);
5085}
5086SLAB_ATTR_RO(usersize);
5087
a5a84755
CL
5088static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5089{
5f0d5a3a 5090 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
a5a84755
CL
5091}
5092SLAB_ATTR_RO(destroy_by_rcu);
5093
ab9a0f19
LJ
5094static ssize_t reserved_show(struct kmem_cache *s, char *buf)
5095{
5096 return sprintf(buf, "%d\n", s->reserved);
5097}
5098SLAB_ATTR_RO(reserved);
5099
ab4d5ed5 5100#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5101static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5102{
5103 return show_slab_objects(s, buf, SO_ALL);
5104}
5105SLAB_ATTR_RO(slabs);
5106
205ab99d
CL
5107static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5108{
5109 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5110}
5111SLAB_ATTR_RO(total_objects);
5112
81819f0f
CL
5113static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5114{
becfda68 5115 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
81819f0f
CL
5116}
5117
5118static ssize_t sanity_checks_store(struct kmem_cache *s,
5119 const char *buf, size_t length)
5120{
becfda68 5121 s->flags &= ~SLAB_CONSISTENCY_CHECKS;
b789ef51
CL
5122 if (buf[0] == '1') {
5123 s->flags &= ~__CMPXCHG_DOUBLE;
becfda68 5124 s->flags |= SLAB_CONSISTENCY_CHECKS;
b789ef51 5125 }
81819f0f
CL
5126 return length;
5127}
5128SLAB_ATTR(sanity_checks);
5129
5130static ssize_t trace_show(struct kmem_cache *s, char *buf)
5131{
5132 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5133}
5134
5135static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5136 size_t length)
5137{
c9e16131
CL
5138 /*
5139 * Tracing a merged cache is going to give confusing results
5140 * as well as cause other issues like converting a mergeable
5141 * cache into an umergeable one.
5142 */
5143 if (s->refcount > 1)
5144 return -EINVAL;
5145
81819f0f 5146 s->flags &= ~SLAB_TRACE;
b789ef51
CL
5147 if (buf[0] == '1') {
5148 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5149 s->flags |= SLAB_TRACE;
b789ef51 5150 }
81819f0f
CL
5151 return length;
5152}
5153SLAB_ATTR(trace);
5154
81819f0f
CL
5155static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5156{
5157 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5158}
5159
5160static ssize_t red_zone_store(struct kmem_cache *s,
5161 const char *buf, size_t length)
5162{
5163 if (any_slab_objects(s))
5164 return -EBUSY;
5165
5166 s->flags &= ~SLAB_RED_ZONE;
b789ef51 5167 if (buf[0] == '1') {
81819f0f 5168 s->flags |= SLAB_RED_ZONE;
b789ef51 5169 }
06b285dc 5170 calculate_sizes(s, -1);
81819f0f
CL
5171 return length;
5172}
5173SLAB_ATTR(red_zone);
5174
5175static ssize_t poison_show(struct kmem_cache *s, char *buf)
5176{
5177 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5178}
5179
5180static ssize_t poison_store(struct kmem_cache *s,
5181 const char *buf, size_t length)
5182{
5183 if (any_slab_objects(s))
5184 return -EBUSY;
5185
5186 s->flags &= ~SLAB_POISON;
b789ef51 5187 if (buf[0] == '1') {
81819f0f 5188 s->flags |= SLAB_POISON;
b789ef51 5189 }
06b285dc 5190 calculate_sizes(s, -1);
81819f0f
CL
5191 return length;
5192}
5193SLAB_ATTR(poison);
5194
5195static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5196{
5197 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5198}
5199
5200static ssize_t store_user_store(struct kmem_cache *s,
5201 const char *buf, size_t length)
5202{
5203 if (any_slab_objects(s))
5204 return -EBUSY;
5205
5206 s->flags &= ~SLAB_STORE_USER;
b789ef51
CL
5207 if (buf[0] == '1') {
5208 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5209 s->flags |= SLAB_STORE_USER;
b789ef51 5210 }
06b285dc 5211 calculate_sizes(s, -1);
81819f0f
CL
5212 return length;
5213}
5214SLAB_ATTR(store_user);
5215
53e15af0
CL
5216static ssize_t validate_show(struct kmem_cache *s, char *buf)
5217{
5218 return 0;
5219}
5220
5221static ssize_t validate_store(struct kmem_cache *s,
5222 const char *buf, size_t length)
5223{
434e245d
CL
5224 int ret = -EINVAL;
5225
5226 if (buf[0] == '1') {
5227 ret = validate_slab_cache(s);
5228 if (ret >= 0)
5229 ret = length;
5230 }
5231 return ret;
53e15af0
CL
5232}
5233SLAB_ATTR(validate);
a5a84755
CL
5234
5235static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5236{
5237 if (!(s->flags & SLAB_STORE_USER))
5238 return -ENOSYS;
5239 return list_locations(s, buf, TRACK_ALLOC);
5240}
5241SLAB_ATTR_RO(alloc_calls);
5242
5243static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5244{
5245 if (!(s->flags & SLAB_STORE_USER))
5246 return -ENOSYS;
5247 return list_locations(s, buf, TRACK_FREE);
5248}
5249SLAB_ATTR_RO(free_calls);
5250#endif /* CONFIG_SLUB_DEBUG */
5251
5252#ifdef CONFIG_FAILSLAB
5253static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5254{
5255 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5256}
5257
5258static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5259 size_t length)
5260{
c9e16131
CL
5261 if (s->refcount > 1)
5262 return -EINVAL;
5263
a5a84755
CL
5264 s->flags &= ~SLAB_FAILSLAB;
5265 if (buf[0] == '1')
5266 s->flags |= SLAB_FAILSLAB;
5267 return length;
5268}
5269SLAB_ATTR(failslab);
ab4d5ed5 5270#endif
53e15af0 5271
2086d26a
CL
5272static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5273{
5274 return 0;
5275}
5276
5277static ssize_t shrink_store(struct kmem_cache *s,
5278 const char *buf, size_t length)
5279{
832f37f5
VD
5280 if (buf[0] == '1')
5281 kmem_cache_shrink(s);
5282 else
2086d26a
CL
5283 return -EINVAL;
5284 return length;
5285}
5286SLAB_ATTR(shrink);
5287
81819f0f 5288#ifdef CONFIG_NUMA
9824601e 5289static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 5290{
9824601e 5291 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
5292}
5293
9824601e 5294static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
5295 const char *buf, size_t length)
5296{
0121c619
CL
5297 unsigned long ratio;
5298 int err;
5299
3dbb95f7 5300 err = kstrtoul(buf, 10, &ratio);
0121c619
CL
5301 if (err)
5302 return err;
5303
e2cb96b7 5304 if (ratio <= 100)
0121c619 5305 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 5306
81819f0f
CL
5307 return length;
5308}
9824601e 5309SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
5310#endif
5311
8ff12cfc 5312#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
5313static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5314{
5315 unsigned long sum = 0;
5316 int cpu;
5317 int len;
5318 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5319
5320 if (!data)
5321 return -ENOMEM;
5322
5323 for_each_online_cpu(cpu) {
9dfc6e68 5324 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc
CL
5325
5326 data[cpu] = x;
5327 sum += x;
5328 }
5329
5330 len = sprintf(buf, "%lu", sum);
5331
50ef37b9 5332#ifdef CONFIG_SMP
8ff12cfc
CL
5333 for_each_online_cpu(cpu) {
5334 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 5335 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 5336 }
50ef37b9 5337#endif
8ff12cfc
CL
5338 kfree(data);
5339 return len + sprintf(buf + len, "\n");
5340}
5341
78eb00cc
DR
5342static void clear_stat(struct kmem_cache *s, enum stat_item si)
5343{
5344 int cpu;
5345
5346 for_each_online_cpu(cpu)
9dfc6e68 5347 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc
DR
5348}
5349
8ff12cfc
CL
5350#define STAT_ATTR(si, text) \
5351static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5352{ \
5353 return show_stat(s, buf, si); \
5354} \
78eb00cc
DR
5355static ssize_t text##_store(struct kmem_cache *s, \
5356 const char *buf, size_t length) \
5357{ \
5358 if (buf[0] != '0') \
5359 return -EINVAL; \
5360 clear_stat(s, si); \
5361 return length; \
5362} \
5363SLAB_ATTR(text); \
8ff12cfc
CL
5364
5365STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5366STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5367STAT_ATTR(FREE_FASTPATH, free_fastpath);
5368STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5369STAT_ATTR(FREE_FROZEN, free_frozen);
5370STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5371STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5372STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5373STAT_ATTR(ALLOC_SLAB, alloc_slab);
5374STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652 5375STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc
CL
5376STAT_ATTR(FREE_SLAB, free_slab);
5377STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5378STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5379STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5380STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5381STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5382STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af 5383STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376a 5384STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef51
CL
5385STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5386STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e22585
CL
5387STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5388STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8028dcea
AS
5389STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5390STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
8ff12cfc
CL
5391#endif
5392
06428780 5393static struct attribute *slab_attrs[] = {
81819f0f
CL
5394 &slab_size_attr.attr,
5395 &object_size_attr.attr,
5396 &objs_per_slab_attr.attr,
5397 &order_attr.attr,
73d342b1 5398 &min_partial_attr.attr,
49e22585 5399 &cpu_partial_attr.attr,
81819f0f 5400 &objects_attr.attr,
205ab99d 5401 &objects_partial_attr.attr,
81819f0f
CL
5402 &partial_attr.attr,
5403 &cpu_slabs_attr.attr,
5404 &ctor_attr.attr,
81819f0f
CL
5405 &aliases_attr.attr,
5406 &align_attr.attr,
81819f0f
CL
5407 &hwcache_align_attr.attr,
5408 &reclaim_account_attr.attr,
5409 &destroy_by_rcu_attr.attr,
a5a84755 5410 &shrink_attr.attr,
ab9a0f19 5411 &reserved_attr.attr,
49e22585 5412 &slabs_cpu_partial_attr.attr,
ab4d5ed5 5413#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5414 &total_objects_attr.attr,
5415 &slabs_attr.attr,
5416 &sanity_checks_attr.attr,
5417 &trace_attr.attr,
81819f0f
CL
5418 &red_zone_attr.attr,
5419 &poison_attr.attr,
5420 &store_user_attr.attr,
53e15af0 5421 &validate_attr.attr,
88a420e4
CL
5422 &alloc_calls_attr.attr,
5423 &free_calls_attr.attr,
ab4d5ed5 5424#endif
81819f0f
CL
5425#ifdef CONFIG_ZONE_DMA
5426 &cache_dma_attr.attr,
5427#endif
5428#ifdef CONFIG_NUMA
9824601e 5429 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
5430#endif
5431#ifdef CONFIG_SLUB_STATS
5432 &alloc_fastpath_attr.attr,
5433 &alloc_slowpath_attr.attr,
5434 &free_fastpath_attr.attr,
5435 &free_slowpath_attr.attr,
5436 &free_frozen_attr.attr,
5437 &free_add_partial_attr.attr,
5438 &free_remove_partial_attr.attr,
5439 &alloc_from_partial_attr.attr,
5440 &alloc_slab_attr.attr,
5441 &alloc_refill_attr.attr,
e36a2652 5442 &alloc_node_mismatch_attr.attr,
8ff12cfc
CL
5443 &free_slab_attr.attr,
5444 &cpuslab_flush_attr.attr,
5445 &deactivate_full_attr.attr,
5446 &deactivate_empty_attr.attr,
5447 &deactivate_to_head_attr.attr,
5448 &deactivate_to_tail_attr.attr,
5449 &deactivate_remote_frees_attr.attr,
03e404af 5450 &deactivate_bypass_attr.attr,
65c3376a 5451 &order_fallback_attr.attr,
b789ef51
CL
5452 &cmpxchg_double_fail_attr.attr,
5453 &cmpxchg_double_cpu_fail_attr.attr,
49e22585
CL
5454 &cpu_partial_alloc_attr.attr,
5455 &cpu_partial_free_attr.attr,
8028dcea
AS
5456 &cpu_partial_node_attr.attr,
5457 &cpu_partial_drain_attr.attr,
81819f0f 5458#endif
4c13dd3b
DM
5459#ifdef CONFIG_FAILSLAB
5460 &failslab_attr.attr,
5461#endif
8eb8284b 5462 &usersize_attr.attr,
4c13dd3b 5463
81819f0f
CL
5464 NULL
5465};
5466
1fdaaa23 5467static const struct attribute_group slab_attr_group = {
81819f0f
CL
5468 .attrs = slab_attrs,
5469};
5470
5471static ssize_t slab_attr_show(struct kobject *kobj,
5472 struct attribute *attr,
5473 char *buf)
5474{
5475 struct slab_attribute *attribute;
5476 struct kmem_cache *s;
5477 int err;
5478
5479 attribute = to_slab_attr(attr);
5480 s = to_slab(kobj);
5481
5482 if (!attribute->show)
5483 return -EIO;
5484
5485 err = attribute->show(s, buf);
5486
5487 return err;
5488}
5489
5490static ssize_t slab_attr_store(struct kobject *kobj,
5491 struct attribute *attr,
5492 const char *buf, size_t len)
5493{
5494 struct slab_attribute *attribute;
5495 struct kmem_cache *s;
5496 int err;
5497
5498 attribute = to_slab_attr(attr);
5499 s = to_slab(kobj);
5500
5501 if (!attribute->store)
5502 return -EIO;
5503
5504 err = attribute->store(s, buf, len);
127424c8 5505#ifdef CONFIG_MEMCG
107dab5c 5506 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
426589f5 5507 struct kmem_cache *c;
81819f0f 5508
107dab5c
GC
5509 mutex_lock(&slab_mutex);
5510 if (s->max_attr_size < len)
5511 s->max_attr_size = len;
5512
ebe945c2
GC
5513 /*
5514 * This is a best effort propagation, so this function's return
5515 * value will be determined by the parent cache only. This is
5516 * basically because not all attributes will have a well
5517 * defined semantics for rollbacks - most of the actions will
5518 * have permanent effects.
5519 *
5520 * Returning the error value of any of the children that fail
5521 * is not 100 % defined, in the sense that users seeing the
5522 * error code won't be able to know anything about the state of
5523 * the cache.
5524 *
5525 * Only returning the error code for the parent cache at least
5526 * has well defined semantics. The cache being written to
5527 * directly either failed or succeeded, in which case we loop
5528 * through the descendants with best-effort propagation.
5529 */
426589f5
VD
5530 for_each_memcg_cache(c, s)
5531 attribute->store(c, buf, len);
107dab5c
GC
5532 mutex_unlock(&slab_mutex);
5533 }
5534#endif
81819f0f
CL
5535 return err;
5536}
5537
107dab5c
GC
5538static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5539{
127424c8 5540#ifdef CONFIG_MEMCG
107dab5c
GC
5541 int i;
5542 char *buffer = NULL;
93030d83 5543 struct kmem_cache *root_cache;
107dab5c 5544
93030d83 5545 if (is_root_cache(s))
107dab5c
GC
5546 return;
5547
f7ce3190 5548 root_cache = s->memcg_params.root_cache;
93030d83 5549
107dab5c
GC
5550 /*
5551 * This mean this cache had no attribute written. Therefore, no point
5552 * in copying default values around
5553 */
93030d83 5554 if (!root_cache->max_attr_size)
107dab5c
GC
5555 return;
5556
5557 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5558 char mbuf[64];
5559 char *buf;
5560 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
478fe303 5561 ssize_t len;
107dab5c
GC
5562
5563 if (!attr || !attr->store || !attr->show)
5564 continue;
5565
5566 /*
5567 * It is really bad that we have to allocate here, so we will
5568 * do it only as a fallback. If we actually allocate, though,
5569 * we can just use the allocated buffer until the end.
5570 *
5571 * Most of the slub attributes will tend to be very small in
5572 * size, but sysfs allows buffers up to a page, so they can
5573 * theoretically happen.
5574 */
5575 if (buffer)
5576 buf = buffer;
93030d83 5577 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
107dab5c
GC
5578 buf = mbuf;
5579 else {
5580 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5581 if (WARN_ON(!buffer))
5582 continue;
5583 buf = buffer;
5584 }
5585
478fe303
TG
5586 len = attr->show(root_cache, buf);
5587 if (len > 0)
5588 attr->store(s, buf, len);
107dab5c
GC
5589 }
5590
5591 if (buffer)
5592 free_page((unsigned long)buffer);
5593#endif
5594}
5595
41a21285
CL
5596static void kmem_cache_release(struct kobject *k)
5597{
5598 slab_kmem_cache_release(to_slab(k));
5599}
5600
52cf25d0 5601static const struct sysfs_ops slab_sysfs_ops = {
81819f0f
CL
5602 .show = slab_attr_show,
5603 .store = slab_attr_store,
5604};
5605
5606static struct kobj_type slab_ktype = {
5607 .sysfs_ops = &slab_sysfs_ops,
41a21285 5608 .release = kmem_cache_release,
81819f0f
CL
5609};
5610
5611static int uevent_filter(struct kset *kset, struct kobject *kobj)
5612{
5613 struct kobj_type *ktype = get_ktype(kobj);
5614
5615 if (ktype == &slab_ktype)
5616 return 1;
5617 return 0;
5618}
5619
9cd43611 5620static const struct kset_uevent_ops slab_uevent_ops = {
81819f0f
CL
5621 .filter = uevent_filter,
5622};
5623
27c3a314 5624static struct kset *slab_kset;
81819f0f 5625
9a41707b
VD
5626static inline struct kset *cache_kset(struct kmem_cache *s)
5627{
127424c8 5628#ifdef CONFIG_MEMCG
9a41707b 5629 if (!is_root_cache(s))
f7ce3190 5630 return s->memcg_params.root_cache->memcg_kset;
9a41707b
VD
5631#endif
5632 return slab_kset;
5633}
5634
81819f0f
CL
5635#define ID_STR_LENGTH 64
5636
5637/* Create a unique string id for a slab cache:
6446faa2
CL
5638 *
5639 * Format :[flags-]size
81819f0f
CL
5640 */
5641static char *create_unique_id(struct kmem_cache *s)
5642{
5643 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5644 char *p = name;
5645
5646 BUG_ON(!name);
5647
5648 *p++ = ':';
5649 /*
5650 * First flags affecting slabcache operations. We will only
5651 * get here for aliasable slabs so we do not need to support
5652 * too many flags. The flags here must cover all flags that
5653 * are matched during merging to guarantee that the id is
5654 * unique.
5655 */
5656 if (s->flags & SLAB_CACHE_DMA)
5657 *p++ = 'd';
5658 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5659 *p++ = 'a';
becfda68 5660 if (s->flags & SLAB_CONSISTENCY_CHECKS)
81819f0f 5661 *p++ = 'F';
230e9fc2
VD
5662 if (s->flags & SLAB_ACCOUNT)
5663 *p++ = 'A';
81819f0f
CL
5664 if (p != name + 1)
5665 *p++ = '-';
5666 p += sprintf(p, "%07d", s->size);
2633d7a0 5667
81819f0f
CL
5668 BUG_ON(p > name + ID_STR_LENGTH - 1);
5669 return name;
5670}
5671
3b7b3140
TH
5672static void sysfs_slab_remove_workfn(struct work_struct *work)
5673{
5674 struct kmem_cache *s =
5675 container_of(work, struct kmem_cache, kobj_remove_work);
5676
5677 if (!s->kobj.state_in_sysfs)
5678 /*
5679 * For a memcg cache, this may be called during
5680 * deactivation and again on shutdown. Remove only once.
5681 * A cache is never shut down before deactivation is
5682 * complete, so no need to worry about synchronization.
5683 */
f6ba4880 5684 goto out;
3b7b3140
TH
5685
5686#ifdef CONFIG_MEMCG
5687 kset_unregister(s->memcg_kset);
5688#endif
5689 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5690 kobject_del(&s->kobj);
f6ba4880 5691out:
3b7b3140
TH
5692 kobject_put(&s->kobj);
5693}
5694
81819f0f
CL
5695static int sysfs_slab_add(struct kmem_cache *s)
5696{
5697 int err;
5698 const char *name;
1663f26d 5699 struct kset *kset = cache_kset(s);
45530c44 5700 int unmergeable = slab_unmergeable(s);
81819f0f 5701
3b7b3140
TH
5702 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5703
1663f26d
TH
5704 if (!kset) {
5705 kobject_init(&s->kobj, &slab_ktype);
5706 return 0;
5707 }
5708
11066386
MC
5709 if (!unmergeable && disable_higher_order_debug &&
5710 (slub_debug & DEBUG_METADATA_FLAGS))
5711 unmergeable = 1;
5712
81819f0f
CL
5713 if (unmergeable) {
5714 /*
5715 * Slabcache can never be merged so we can use the name proper.
5716 * This is typically the case for debug situations. In that
5717 * case we can catch duplicate names easily.
5718 */
27c3a314 5719 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
5720 name = s->name;
5721 } else {
5722 /*
5723 * Create a unique name for the slab as a target
5724 * for the symlinks.
5725 */
5726 name = create_unique_id(s);
5727 }
5728
1663f26d 5729 s->kobj.kset = kset;
26e4f205 5730 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
54b6a731 5731 if (err)
80da026a 5732 goto out;
81819f0f
CL
5733
5734 err = sysfs_create_group(&s->kobj, &slab_attr_group);
54b6a731
DJ
5735 if (err)
5736 goto out_del_kobj;
9a41707b 5737
127424c8 5738#ifdef CONFIG_MEMCG
1663f26d 5739 if (is_root_cache(s) && memcg_sysfs_enabled) {
9a41707b
VD
5740 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5741 if (!s->memcg_kset) {
54b6a731
DJ
5742 err = -ENOMEM;
5743 goto out_del_kobj;
9a41707b
VD
5744 }
5745 }
5746#endif
5747
81819f0f
CL
5748 kobject_uevent(&s->kobj, KOBJ_ADD);
5749 if (!unmergeable) {
5750 /* Setup first alias */
5751 sysfs_slab_alias(s, s->name);
81819f0f 5752 }
54b6a731
DJ
5753out:
5754 if (!unmergeable)
5755 kfree(name);
5756 return err;
5757out_del_kobj:
5758 kobject_del(&s->kobj);
54b6a731 5759 goto out;
81819f0f
CL
5760}
5761
bf5eb3de 5762static void sysfs_slab_remove(struct kmem_cache *s)
81819f0f 5763{
97d06609 5764 if (slab_state < FULL)
2bce6485
CL
5765 /*
5766 * Sysfs has not been setup yet so no need to remove the
5767 * cache from sysfs.
5768 */
5769 return;
5770
3b7b3140
TH
5771 kobject_get(&s->kobj);
5772 schedule_work(&s->kobj_remove_work);
bf5eb3de
TH
5773}
5774
5775void sysfs_slab_release(struct kmem_cache *s)
5776{
5777 if (slab_state >= FULL)
5778 kobject_put(&s->kobj);
81819f0f
CL
5779}
5780
5781/*
5782 * Need to buffer aliases during bootup until sysfs becomes
9f6c708e 5783 * available lest we lose that information.
81819f0f
CL
5784 */
5785struct saved_alias {
5786 struct kmem_cache *s;
5787 const char *name;
5788 struct saved_alias *next;
5789};
5790
5af328a5 5791static struct saved_alias *alias_list;
81819f0f
CL
5792
5793static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5794{
5795 struct saved_alias *al;
5796
97d06609 5797 if (slab_state == FULL) {
81819f0f
CL
5798 /*
5799 * If we have a leftover link then remove it.
5800 */
27c3a314
GKH
5801 sysfs_remove_link(&slab_kset->kobj, name);
5802 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
5803 }
5804
5805 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5806 if (!al)
5807 return -ENOMEM;
5808
5809 al->s = s;
5810 al->name = name;
5811 al->next = alias_list;
5812 alias_list = al;
5813 return 0;
5814}
5815
5816static int __init slab_sysfs_init(void)
5817{
5b95a4ac 5818 struct kmem_cache *s;
81819f0f
CL
5819 int err;
5820
18004c5d 5821 mutex_lock(&slab_mutex);
2bce6485 5822
0ff21e46 5823 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 5824 if (!slab_kset) {
18004c5d 5825 mutex_unlock(&slab_mutex);
f9f58285 5826 pr_err("Cannot register slab subsystem.\n");
81819f0f
CL
5827 return -ENOSYS;
5828 }
5829
97d06609 5830 slab_state = FULL;
26a7bd03 5831
5b95a4ac 5832 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 5833 err = sysfs_slab_add(s);
5d540fb7 5834 if (err)
f9f58285
FF
5835 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5836 s->name);
26a7bd03 5837 }
81819f0f
CL
5838
5839 while (alias_list) {
5840 struct saved_alias *al = alias_list;
5841
5842 alias_list = alias_list->next;
5843 err = sysfs_slab_alias(al->s, al->name);
5d540fb7 5844 if (err)
f9f58285
FF
5845 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5846 al->name);
81819f0f
CL
5847 kfree(al);
5848 }
5849
18004c5d 5850 mutex_unlock(&slab_mutex);
81819f0f
CL
5851 resiliency_test();
5852 return 0;
5853}
5854
5855__initcall(slab_sysfs_init);
ab4d5ed5 5856#endif /* CONFIG_SYSFS */
57ed3eda
PE
5857
5858/*
5859 * The /proc/slabinfo ABI
5860 */
5b365771 5861#ifdef CONFIG_SLUB_DEBUG
0d7561c6 5862void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57ed3eda 5863{
57ed3eda 5864 unsigned long nr_slabs = 0;
205ab99d
CL
5865 unsigned long nr_objs = 0;
5866 unsigned long nr_free = 0;
57ed3eda 5867 int node;
fa45dc25 5868 struct kmem_cache_node *n;
57ed3eda 5869
fa45dc25 5870 for_each_kmem_cache_node(s, node, n) {
c17fd13e
WL
5871 nr_slabs += node_nr_slabs(n);
5872 nr_objs += node_nr_objs(n);
205ab99d 5873 nr_free += count_partial(n, count_free);
57ed3eda
PE
5874 }
5875
0d7561c6
GC
5876 sinfo->active_objs = nr_objs - nr_free;
5877 sinfo->num_objs = nr_objs;
5878 sinfo->active_slabs = nr_slabs;
5879 sinfo->num_slabs = nr_slabs;
5880 sinfo->objects_per_slab = oo_objects(s->oo);
5881 sinfo->cache_order = oo_order(s->oo);
57ed3eda
PE
5882}
5883
0d7561c6 5884void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7b3c3a50 5885{
7b3c3a50
AD
5886}
5887
b7454ad3
GC
5888ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5889 size_t count, loff_t *ppos)
7b3c3a50 5890{
b7454ad3 5891 return -EIO;
7b3c3a50 5892}
5b365771 5893#endif /* CONFIG_SLUB_DEBUG */