drm: rcar-du: add missing of_node_put
[linux-2.6-block.git] / mm / slub.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
81819f0f
CL
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
881db7fb
CL
6 * The allocator synchronizes using per slab locks or atomic operatios
7 * and only uses a centralized lock to manage a pool of partial slabs.
81819f0f 8 *
cde53535 9 * (C) 2007 SGI, Christoph Lameter
881db7fb 10 * (C) 2011 Linux Foundation, Christoph Lameter
81819f0f
CL
11 */
12
13#include <linux/mm.h>
1eb5ac64 14#include <linux/swap.h> /* struct reclaim_state */
81819f0f
CL
15#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/bitops.h>
19#include <linux/slab.h>
97d06609 20#include "slab.h"
7b3c3a50 21#include <linux/proc_fs.h>
81819f0f 22#include <linux/seq_file.h>
a79316c6 23#include <linux/kasan.h>
81819f0f
CL
24#include <linux/cpu.h>
25#include <linux/cpuset.h>
26#include <linux/mempolicy.h>
27#include <linux/ctype.h>
3ac7fe5a 28#include <linux/debugobjects.h>
81819f0f 29#include <linux/kallsyms.h>
b9049e23 30#include <linux/memory.h>
f8bd2258 31#include <linux/math64.h>
773ff60e 32#include <linux/fault-inject.h>
bfa71457 33#include <linux/stacktrace.h>
4de900b4 34#include <linux/prefetch.h>
2633d7a0 35#include <linux/memcontrol.h>
2482ddec 36#include <linux/random.h>
81819f0f 37
4a92379b
RK
38#include <trace/events/kmem.h>
39
072bb0aa
MG
40#include "internal.h"
41
81819f0f
CL
42/*
43 * Lock order:
18004c5d 44 * 1. slab_mutex (Global Mutex)
881db7fb
CL
45 * 2. node->list_lock
46 * 3. slab_lock(page) (Only on some arches and for debugging)
81819f0f 47 *
18004c5d 48 * slab_mutex
881db7fb 49 *
18004c5d 50 * The role of the slab_mutex is to protect the list of all the slabs
881db7fb
CL
51 * and to synchronize major metadata changes to slab cache structures.
52 *
53 * The slab_lock is only used for debugging and on arches that do not
b7ccc7f8 54 * have the ability to do a cmpxchg_double. It only protects:
881db7fb 55 * A. page->freelist -> List of object free in a page
b7ccc7f8
MW
56 * B. page->inuse -> Number of objects in use
57 * C. page->objects -> Number of objects in page
58 * D. page->frozen -> frozen state
881db7fb
CL
59 *
60 * If a slab is frozen then it is exempt from list management. It is not
61 * on any list. The processor that froze the slab is the one who can
62 * perform list operations on the page. Other processors may put objects
63 * onto the freelist but the processor that froze the slab is the only
64 * one that can retrieve the objects from the page's freelist.
81819f0f
CL
65 *
66 * The list_lock protects the partial and full list on each node and
67 * the partial slab counter. If taken then no new slabs may be added or
68 * removed from the lists nor make the number of partial slabs be modified.
69 * (Note that the total number of slabs is an atomic value that may be
70 * modified without taking the list lock).
71 *
72 * The list_lock is a centralized lock and thus we avoid taking it as
73 * much as possible. As long as SLUB does not have to handle partial
74 * slabs, operations can continue without any centralized lock. F.e.
75 * allocating a long series of objects that fill up slabs does not require
76 * the list lock.
81819f0f
CL
77 * Interrupts are disabled during allocation and deallocation in order to
78 * make the slab allocator safe to use in the context of an irq. In addition
79 * interrupts are disabled to ensure that the processor does not change
80 * while handling per_cpu slabs, due to kernel preemption.
81 *
82 * SLUB assigns one slab for allocation to each processor.
83 * Allocations only occur from these slabs called cpu slabs.
84 *
672bba3a
CL
85 * Slabs with free elements are kept on a partial list and during regular
86 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 87 * freed then the slab will show up again on the partial lists.
672bba3a
CL
88 * We track full slabs for debugging purposes though because otherwise we
89 * cannot scan all objects.
81819f0f
CL
90 *
91 * Slabs are freed when they become empty. Teardown and setup is
92 * minimal so we rely on the page allocators per cpu caches for
93 * fast frees and allocs.
94 *
95 * Overloading of page flags that are otherwise used for LRU management.
96 *
4b6f0750
CL
97 * PageActive The slab is frozen and exempt from list processing.
98 * This means that the slab is dedicated to a purpose
99 * such as satisfying allocations for a specific
100 * processor. Objects may be freed in the slab while
101 * it is frozen but slab_free will then skip the usual
102 * list operations. It is up to the processor holding
103 * the slab to integrate the slab into the slab lists
104 * when the slab is no longer needed.
105 *
106 * One use of this flag is to mark slabs that are
107 * used for allocations. Then such a slab becomes a cpu
108 * slab. The cpu slab may be equipped with an additional
dfb4f096 109 * freelist that allows lockless access to
894b8788
CL
110 * free objects in addition to the regular freelist
111 * that requires the slab lock.
81819f0f
CL
112 *
113 * PageError Slab requires special handling due to debug
114 * options set. This moves slab handling out of
894b8788 115 * the fast path and disables lockless freelists.
81819f0f
CL
116 */
117
af537b0a
CL
118static inline int kmem_cache_debug(struct kmem_cache *s)
119{
5577bd8a 120#ifdef CONFIG_SLUB_DEBUG
af537b0a 121 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a 122#else
af537b0a 123 return 0;
5577bd8a 124#endif
af537b0a 125}
5577bd8a 126
117d54df 127void *fixup_red_left(struct kmem_cache *s, void *p)
d86bd1be
JK
128{
129 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
130 p += s->red_left_pad;
131
132 return p;
133}
134
345c905d
JK
135static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
136{
137#ifdef CONFIG_SLUB_CPU_PARTIAL
138 return !kmem_cache_debug(s);
139#else
140 return false;
141#endif
142}
143
81819f0f
CL
144/*
145 * Issues still to be resolved:
146 *
81819f0f
CL
147 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
148 *
81819f0f
CL
149 * - Variable sizing of the per node arrays
150 */
151
152/* Enable to test recovery from slab corruption on boot */
153#undef SLUB_RESILIENCY_TEST
154
b789ef51
CL
155/* Enable to log cmpxchg failures */
156#undef SLUB_DEBUG_CMPXCHG
157
2086d26a
CL
158/*
159 * Mininum number of partial slabs. These will be left on the partial
160 * lists even if they are empty. kmem_cache_shrink may reclaim them.
161 */
76be8950 162#define MIN_PARTIAL 5
e95eed57 163
2086d26a
CL
164/*
165 * Maximum number of desirable partial slabs.
166 * The existence of more partial slabs makes kmem_cache_shrink
721ae22a 167 * sort the partial list by the number of objects in use.
2086d26a
CL
168 */
169#define MAX_PARTIAL 10
170
becfda68 171#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
81819f0f 172 SLAB_POISON | SLAB_STORE_USER)
672bba3a 173
149daaf3
LA
174/*
175 * These debug flags cannot use CMPXCHG because there might be consistency
176 * issues when checking or reading debug information
177 */
178#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
179 SLAB_TRACE)
180
181
fa5ec8a1 182/*
3de47213
DR
183 * Debugging flags that require metadata to be stored in the slab. These get
184 * disabled when slub_debug=O is used and a cache's min order increases with
185 * metadata.
fa5ec8a1 186 */
3de47213 187#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1 188
210b5c06
CG
189#define OO_SHIFT 16
190#define OO_MASK ((1 << OO_SHIFT) - 1)
50d5c41c 191#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
210b5c06 192
81819f0f 193/* Internal SLUB flags */
d50112ed 194/* Poison object */
4fd0b46e 195#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
d50112ed 196/* Use cmpxchg_double */
4fd0b46e 197#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
81819f0f 198
02cbc874
CL
199/*
200 * Tracking user of a slab.
201 */
d6543e39 202#define TRACK_ADDRS_COUNT 16
02cbc874 203struct track {
ce71e27c 204 unsigned long addr; /* Called from address */
d6543e39
BG
205#ifdef CONFIG_STACKTRACE
206 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
207#endif
02cbc874
CL
208 int cpu; /* Was running on cpu */
209 int pid; /* Pid context */
210 unsigned long when; /* When did the operation occur */
211};
212
213enum track_item { TRACK_ALLOC, TRACK_FREE };
214
ab4d5ed5 215#ifdef CONFIG_SYSFS
81819f0f
CL
216static int sysfs_slab_add(struct kmem_cache *);
217static int sysfs_slab_alias(struct kmem_cache *, const char *);
107dab5c 218static void memcg_propagate_slab_attrs(struct kmem_cache *s);
bf5eb3de 219static void sysfs_slab_remove(struct kmem_cache *s);
81819f0f 220#else
0c710013
CL
221static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
222static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
223 { return 0; }
107dab5c 224static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
bf5eb3de 225static inline void sysfs_slab_remove(struct kmem_cache *s) { }
81819f0f
CL
226#endif
227
4fdccdfb 228static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc
CL
229{
230#ifdef CONFIG_SLUB_STATS
88da03a6
CL
231 /*
232 * The rmw is racy on a preemptible kernel but this is acceptable, so
233 * avoid this_cpu_add()'s irq-disable overhead.
234 */
235 raw_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc
CL
236#endif
237}
238
81819f0f
CL
239/********************************************************************
240 * Core slab cache functions
241 *******************************************************************/
242
2482ddec
KC
243/*
244 * Returns freelist pointer (ptr). With hardening, this is obfuscated
245 * with an XOR of the address where the pointer is held and a per-cache
246 * random number.
247 */
248static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
249 unsigned long ptr_addr)
250{
251#ifdef CONFIG_SLAB_FREELIST_HARDENED
252 return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
253#else
254 return ptr;
255#endif
256}
257
258/* Returns the freelist pointer recorded at location ptr_addr. */
259static inline void *freelist_dereference(const struct kmem_cache *s,
260 void *ptr_addr)
261{
262 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
263 (unsigned long)ptr_addr);
264}
265
7656c72b
CL
266static inline void *get_freepointer(struct kmem_cache *s, void *object)
267{
2482ddec 268 return freelist_dereference(s, object + s->offset);
7656c72b
CL
269}
270
0ad9500e
ED
271static void prefetch_freepointer(const struct kmem_cache *s, void *object)
272{
0882ff91 273 prefetch(object + s->offset);
0ad9500e
ED
274}
275
1393d9a1
CL
276static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
277{
2482ddec 278 unsigned long freepointer_addr;
1393d9a1
CL
279 void *p;
280
922d566c
JK
281 if (!debug_pagealloc_enabled())
282 return get_freepointer(s, object);
283
2482ddec
KC
284 freepointer_addr = (unsigned long)object + s->offset;
285 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
286 return freelist_ptr(s, p, freepointer_addr);
1393d9a1
CL
287}
288
7656c72b
CL
289static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
290{
2482ddec
KC
291 unsigned long freeptr_addr = (unsigned long)object + s->offset;
292
ce6fa91b
AP
293#ifdef CONFIG_SLAB_FREELIST_HARDENED
294 BUG_ON(object == fp); /* naive detection of double free or corruption */
295#endif
296
2482ddec 297 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
7656c72b
CL
298}
299
300/* Loop over all objects in a slab */
224a88be 301#define for_each_object(__p, __s, __addr, __objects) \
d86bd1be
JK
302 for (__p = fixup_red_left(__s, __addr); \
303 __p < (__addr) + (__objects) * (__s)->size; \
304 __p += (__s)->size)
7656c72b 305
54266640 306#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
d86bd1be
JK
307 for (__p = fixup_red_left(__s, __addr), __idx = 1; \
308 __idx <= __objects; \
309 __p += (__s)->size, __idx++)
54266640 310
7656c72b 311/* Determine object index from a given position */
284b50dd 312static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
7656c72b
CL
313{
314 return (p - addr) / s->size;
315}
316
9736d2a9 317static inline unsigned int order_objects(unsigned int order, unsigned int size)
ab9a0f19 318{
9736d2a9 319 return ((unsigned int)PAGE_SIZE << order) / size;
ab9a0f19
LJ
320}
321
19af27af 322static inline struct kmem_cache_order_objects oo_make(unsigned int order,
9736d2a9 323 unsigned int size)
834f3d11
CL
324{
325 struct kmem_cache_order_objects x = {
9736d2a9 326 (order << OO_SHIFT) + order_objects(order, size)
834f3d11
CL
327 };
328
329 return x;
330}
331
19af27af 332static inline unsigned int oo_order(struct kmem_cache_order_objects x)
834f3d11 333{
210b5c06 334 return x.x >> OO_SHIFT;
834f3d11
CL
335}
336
19af27af 337static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
834f3d11 338{
210b5c06 339 return x.x & OO_MASK;
834f3d11
CL
340}
341
881db7fb
CL
342/*
343 * Per slab locking using the pagelock
344 */
345static __always_inline void slab_lock(struct page *page)
346{
48c935ad 347 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
348 bit_spin_lock(PG_locked, &page->flags);
349}
350
351static __always_inline void slab_unlock(struct page *page)
352{
48c935ad 353 VM_BUG_ON_PAGE(PageTail(page), page);
881db7fb
CL
354 __bit_spin_unlock(PG_locked, &page->flags);
355}
356
1d07171c
CL
357/* Interrupts must be disabled (for the fallback code to work right) */
358static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
359 void *freelist_old, unsigned long counters_old,
360 void *freelist_new, unsigned long counters_new,
361 const char *n)
362{
363 VM_BUG_ON(!irqs_disabled());
2565409f
HC
364#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
365 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c 366 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 367 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
368 freelist_old, counters_old,
369 freelist_new, counters_new))
6f6528a1 370 return true;
1d07171c
CL
371 } else
372#endif
373 {
374 slab_lock(page);
d0e0ac97
CG
375 if (page->freelist == freelist_old &&
376 page->counters == counters_old) {
1d07171c 377 page->freelist = freelist_new;
7d27a04b 378 page->counters = counters_new;
1d07171c 379 slab_unlock(page);
6f6528a1 380 return true;
1d07171c
CL
381 }
382 slab_unlock(page);
383 }
384
385 cpu_relax();
386 stat(s, CMPXCHG_DOUBLE_FAIL);
387
388#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 389 pr_info("%s %s: cmpxchg double redo ", n, s->name);
1d07171c
CL
390#endif
391
6f6528a1 392 return false;
1d07171c
CL
393}
394
b789ef51
CL
395static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
396 void *freelist_old, unsigned long counters_old,
397 void *freelist_new, unsigned long counters_new,
398 const char *n)
399{
2565409f
HC
400#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
401 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51 402 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 403 if (cmpxchg_double(&page->freelist, &page->counters,
0aa9a13d
DC
404 freelist_old, counters_old,
405 freelist_new, counters_new))
6f6528a1 406 return true;
b789ef51
CL
407 } else
408#endif
409 {
1d07171c
CL
410 unsigned long flags;
411
412 local_irq_save(flags);
881db7fb 413 slab_lock(page);
d0e0ac97
CG
414 if (page->freelist == freelist_old &&
415 page->counters == counters_old) {
b789ef51 416 page->freelist = freelist_new;
7d27a04b 417 page->counters = counters_new;
881db7fb 418 slab_unlock(page);
1d07171c 419 local_irq_restore(flags);
6f6528a1 420 return true;
b789ef51 421 }
881db7fb 422 slab_unlock(page);
1d07171c 423 local_irq_restore(flags);
b789ef51
CL
424 }
425
426 cpu_relax();
427 stat(s, CMPXCHG_DOUBLE_FAIL);
428
429#ifdef SLUB_DEBUG_CMPXCHG
f9f58285 430 pr_info("%s %s: cmpxchg double redo ", n, s->name);
b789ef51
CL
431#endif
432
6f6528a1 433 return false;
b789ef51
CL
434}
435
41ecc55b 436#ifdef CONFIG_SLUB_DEBUG
5f80b13a
CL
437/*
438 * Determine a map of object in use on a page.
439 *
881db7fb 440 * Node listlock must be held to guarantee that the page does
5f80b13a
CL
441 * not vanish from under us.
442 */
443static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
444{
445 void *p;
446 void *addr = page_address(page);
447
448 for (p = page->freelist; p; p = get_freepointer(s, p))
449 set_bit(slab_index(p, s, addr), map);
450}
451
870b1fbb 452static inline unsigned int size_from_object(struct kmem_cache *s)
d86bd1be
JK
453{
454 if (s->flags & SLAB_RED_ZONE)
455 return s->size - s->red_left_pad;
456
457 return s->size;
458}
459
460static inline void *restore_red_left(struct kmem_cache *s, void *p)
461{
462 if (s->flags & SLAB_RED_ZONE)
463 p -= s->red_left_pad;
464
465 return p;
466}
467
41ecc55b
CL
468/*
469 * Debug settings:
470 */
89d3c87e 471#if defined(CONFIG_SLUB_DEBUG_ON)
d50112ed 472static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
f0630fff 473#else
d50112ed 474static slab_flags_t slub_debug;
f0630fff 475#endif
41ecc55b
CL
476
477static char *slub_debug_slabs;
fa5ec8a1 478static int disable_higher_order_debug;
41ecc55b 479
a79316c6
AR
480/*
481 * slub is about to manipulate internal object metadata. This memory lies
482 * outside the range of the allocated object, so accessing it would normally
483 * be reported by kasan as a bounds error. metadata_access_enable() is used
484 * to tell kasan that these accesses are OK.
485 */
486static inline void metadata_access_enable(void)
487{
488 kasan_disable_current();
489}
490
491static inline void metadata_access_disable(void)
492{
493 kasan_enable_current();
494}
495
81819f0f
CL
496/*
497 * Object debugging
498 */
d86bd1be
JK
499
500/* Verify that a pointer has an address that is valid within a slab page */
501static inline int check_valid_pointer(struct kmem_cache *s,
502 struct page *page, void *object)
503{
504 void *base;
505
506 if (!object)
507 return 1;
508
509 base = page_address(page);
510 object = restore_red_left(s, object);
511 if (object < base || object >= base + page->objects * s->size ||
512 (object - base) % s->size) {
513 return 0;
514 }
515
516 return 1;
517}
518
aa2efd5e
DT
519static void print_section(char *level, char *text, u8 *addr,
520 unsigned int length)
81819f0f 521{
a79316c6 522 metadata_access_enable();
aa2efd5e 523 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
ffc79d28 524 length, 1);
a79316c6 525 metadata_access_disable();
81819f0f
CL
526}
527
81819f0f
CL
528static struct track *get_track(struct kmem_cache *s, void *object,
529 enum track_item alloc)
530{
531 struct track *p;
532
533 if (s->offset)
534 p = object + s->offset + sizeof(void *);
535 else
536 p = object + s->inuse;
537
538 return p + alloc;
539}
540
541static void set_track(struct kmem_cache *s, void *object,
ce71e27c 542 enum track_item alloc, unsigned long addr)
81819f0f 543{
1a00df4a 544 struct track *p = get_track(s, object, alloc);
81819f0f 545
81819f0f 546 if (addr) {
d6543e39
BG
547#ifdef CONFIG_STACKTRACE
548 struct stack_trace trace;
549 int i;
550
551 trace.nr_entries = 0;
552 trace.max_entries = TRACK_ADDRS_COUNT;
553 trace.entries = p->addrs;
554 trace.skip = 3;
a79316c6 555 metadata_access_enable();
d6543e39 556 save_stack_trace(&trace);
a79316c6 557 metadata_access_disable();
d6543e39
BG
558
559 /* See rant in lockdep.c */
560 if (trace.nr_entries != 0 &&
561 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
562 trace.nr_entries--;
563
564 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
565 p->addrs[i] = 0;
566#endif
81819f0f
CL
567 p->addr = addr;
568 p->cpu = smp_processor_id();
88e4ccf2 569 p->pid = current->pid;
81819f0f
CL
570 p->when = jiffies;
571 } else
572 memset(p, 0, sizeof(struct track));
573}
574
81819f0f
CL
575static void init_tracking(struct kmem_cache *s, void *object)
576{
24922684
CL
577 if (!(s->flags & SLAB_STORE_USER))
578 return;
579
ce71e27c
EGM
580 set_track(s, object, TRACK_FREE, 0UL);
581 set_track(s, object, TRACK_ALLOC, 0UL);
81819f0f
CL
582}
583
86609d33 584static void print_track(const char *s, struct track *t, unsigned long pr_time)
81819f0f
CL
585{
586 if (!t->addr)
587 return;
588
f9f58285 589 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
86609d33 590 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
d6543e39
BG
591#ifdef CONFIG_STACKTRACE
592 {
593 int i;
594 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
595 if (t->addrs[i])
f9f58285 596 pr_err("\t%pS\n", (void *)t->addrs[i]);
d6543e39
BG
597 else
598 break;
599 }
600#endif
24922684
CL
601}
602
603static void print_tracking(struct kmem_cache *s, void *object)
604{
86609d33 605 unsigned long pr_time = jiffies;
24922684
CL
606 if (!(s->flags & SLAB_STORE_USER))
607 return;
608
86609d33
CP
609 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
610 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
24922684
CL
611}
612
613static void print_page_info(struct page *page)
614{
f9f58285 615 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
d0e0ac97 616 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
617
618}
619
620static void slab_bug(struct kmem_cache *s, char *fmt, ...)
621{
ecc42fbe 622 struct va_format vaf;
24922684 623 va_list args;
24922684
CL
624
625 va_start(args, fmt);
ecc42fbe
FF
626 vaf.fmt = fmt;
627 vaf.va = &args;
f9f58285 628 pr_err("=============================================================================\n");
ecc42fbe 629 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
f9f58285 630 pr_err("-----------------------------------------------------------------------------\n\n");
645df230 631
373d4d09 632 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
ecc42fbe 633 va_end(args);
81819f0f
CL
634}
635
24922684
CL
636static void slab_fix(struct kmem_cache *s, char *fmt, ...)
637{
ecc42fbe 638 struct va_format vaf;
24922684 639 va_list args;
24922684
CL
640
641 va_start(args, fmt);
ecc42fbe
FF
642 vaf.fmt = fmt;
643 vaf.va = &args;
644 pr_err("FIX %s: %pV\n", s->name, &vaf);
24922684 645 va_end(args);
24922684
CL
646}
647
648static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
649{
650 unsigned int off; /* Offset of last byte */
a973e9dd 651 u8 *addr = page_address(page);
24922684
CL
652
653 print_tracking(s, p);
654
655 print_page_info(page);
656
f9f58285
FF
657 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
658 p, p - addr, get_freepointer(s, p));
24922684 659
d86bd1be 660 if (s->flags & SLAB_RED_ZONE)
aa2efd5e
DT
661 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
662 s->red_left_pad);
d86bd1be 663 else if (p > addr + 16)
aa2efd5e 664 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
81819f0f 665
aa2efd5e 666 print_section(KERN_ERR, "Object ", p,
1b473f29 667 min_t(unsigned int, s->object_size, PAGE_SIZE));
81819f0f 668 if (s->flags & SLAB_RED_ZONE)
aa2efd5e 669 print_section(KERN_ERR, "Redzone ", p + s->object_size,
3b0efdfa 670 s->inuse - s->object_size);
81819f0f 671
81819f0f
CL
672 if (s->offset)
673 off = s->offset + sizeof(void *);
674 else
675 off = s->inuse;
676
24922684 677 if (s->flags & SLAB_STORE_USER)
81819f0f 678 off += 2 * sizeof(struct track);
81819f0f 679
80a9201a
AP
680 off += kasan_metadata_size(s);
681
d86bd1be 682 if (off != size_from_object(s))
81819f0f 683 /* Beginning of the filler is the free pointer */
aa2efd5e
DT
684 print_section(KERN_ERR, "Padding ", p + off,
685 size_from_object(s) - off);
24922684
CL
686
687 dump_stack();
81819f0f
CL
688}
689
75c66def 690void object_err(struct kmem_cache *s, struct page *page,
81819f0f
CL
691 u8 *object, char *reason)
692{
3dc50637 693 slab_bug(s, "%s", reason);
24922684 694 print_trailer(s, page, object);
81819f0f
CL
695}
696
a38965bf 697static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
d0e0ac97 698 const char *fmt, ...)
81819f0f
CL
699{
700 va_list args;
701 char buf[100];
702
24922684
CL
703 va_start(args, fmt);
704 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 705 va_end(args);
3dc50637 706 slab_bug(s, "%s", buf);
24922684 707 print_page_info(page);
81819f0f
CL
708 dump_stack();
709}
710
f7cb1933 711static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0f
CL
712{
713 u8 *p = object;
714
d86bd1be
JK
715 if (s->flags & SLAB_RED_ZONE)
716 memset(p - s->red_left_pad, val, s->red_left_pad);
717
81819f0f 718 if (s->flags & __OBJECT_POISON) {
3b0efdfa
CL
719 memset(p, POISON_FREE, s->object_size - 1);
720 p[s->object_size - 1] = POISON_END;
81819f0f
CL
721 }
722
723 if (s->flags & SLAB_RED_ZONE)
3b0efdfa 724 memset(p + s->object_size, val, s->inuse - s->object_size);
81819f0f
CL
725}
726
24922684
CL
727static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
728 void *from, void *to)
729{
730 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
731 memset(from, data, to - from);
732}
733
734static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
735 u8 *object, char *what,
06428780 736 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
737{
738 u8 *fault;
739 u8 *end;
740
a79316c6 741 metadata_access_enable();
79824820 742 fault = memchr_inv(start, value, bytes);
a79316c6 743 metadata_access_disable();
24922684
CL
744 if (!fault)
745 return 1;
746
747 end = start + bytes;
748 while (end > fault && end[-1] == value)
749 end--;
750
751 slab_bug(s, "%s overwritten", what);
f9f58285 752 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
24922684
CL
753 fault, end - 1, fault[0], value);
754 print_trailer(s, page, object);
755
756 restore_bytes(s, what, value, fault, end);
757 return 0;
81819f0f
CL
758}
759
81819f0f
CL
760/*
761 * Object layout:
762 *
763 * object address
764 * Bytes of the object to be managed.
765 * If the freepointer may overlay the object then the free
766 * pointer is the first word of the object.
672bba3a 767 *
81819f0f
CL
768 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
769 * 0xa5 (POISON_END)
770 *
3b0efdfa 771 * object + s->object_size
81819f0f 772 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a 773 * Padding is extended by another word if Redzoning is enabled and
3b0efdfa 774 * object_size == inuse.
672bba3a 775 *
81819f0f
CL
776 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
777 * 0xcc (RED_ACTIVE) for objects in use.
778 *
779 * object + s->inuse
672bba3a
CL
780 * Meta data starts here.
781 *
81819f0f
CL
782 * A. Free pointer (if we cannot overwrite object on free)
783 * B. Tracking data for SLAB_STORE_USER
672bba3a 784 * C. Padding to reach required alignment boundary or at mininum
6446faa2 785 * one word if debugging is on to be able to detect writes
672bba3a
CL
786 * before the word boundary.
787 *
788 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
789 *
790 * object + s->size
672bba3a 791 * Nothing is used beyond s->size.
81819f0f 792 *
3b0efdfa 793 * If slabcaches are merged then the object_size and inuse boundaries are mostly
672bba3a 794 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
795 * may be used with merged slabcaches.
796 */
797
81819f0f
CL
798static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
799{
800 unsigned long off = s->inuse; /* The end of info */
801
802 if (s->offset)
803 /* Freepointer is placed after the object. */
804 off += sizeof(void *);
805
806 if (s->flags & SLAB_STORE_USER)
807 /* We also have user information there */
808 off += 2 * sizeof(struct track);
809
80a9201a
AP
810 off += kasan_metadata_size(s);
811
d86bd1be 812 if (size_from_object(s) == off)
81819f0f
CL
813 return 1;
814
24922684 815 return check_bytes_and_report(s, page, p, "Object padding",
d86bd1be 816 p + off, POISON_INUSE, size_from_object(s) - off);
81819f0f
CL
817}
818
39b26464 819/* Check the pad bytes at the end of a slab page */
81819f0f
CL
820static int slab_pad_check(struct kmem_cache *s, struct page *page)
821{
24922684
CL
822 u8 *start;
823 u8 *fault;
824 u8 *end;
5d682681 825 u8 *pad;
24922684
CL
826 int length;
827 int remainder;
81819f0f
CL
828
829 if (!(s->flags & SLAB_POISON))
830 return 1;
831
a973e9dd 832 start = page_address(page);
9736d2a9 833 length = PAGE_SIZE << compound_order(page);
39b26464
CL
834 end = start + length;
835 remainder = length % s->size;
81819f0f
CL
836 if (!remainder)
837 return 1;
838
5d682681 839 pad = end - remainder;
a79316c6 840 metadata_access_enable();
5d682681 841 fault = memchr_inv(pad, POISON_INUSE, remainder);
a79316c6 842 metadata_access_disable();
24922684
CL
843 if (!fault)
844 return 1;
845 while (end > fault && end[-1] == POISON_INUSE)
846 end--;
847
848 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
5d682681 849 print_section(KERN_ERR, "Padding ", pad, remainder);
24922684 850
5d682681 851 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
24922684 852 return 0;
81819f0f
CL
853}
854
855static int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 856 void *object, u8 val)
81819f0f
CL
857{
858 u8 *p = object;
3b0efdfa 859 u8 *endobject = object + s->object_size;
81819f0f
CL
860
861 if (s->flags & SLAB_RED_ZONE) {
d86bd1be
JK
862 if (!check_bytes_and_report(s, page, object, "Redzone",
863 object - s->red_left_pad, val, s->red_left_pad))
864 return 0;
865
24922684 866 if (!check_bytes_and_report(s, page, object, "Redzone",
3b0efdfa 867 endobject, val, s->inuse - s->object_size))
81819f0f 868 return 0;
81819f0f 869 } else {
3b0efdfa 870 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
3adbefee 871 check_bytes_and_report(s, page, p, "Alignment padding",
d0e0ac97
CG
872 endobject, POISON_INUSE,
873 s->inuse - s->object_size);
3adbefee 874 }
81819f0f
CL
875 }
876
877 if (s->flags & SLAB_POISON) {
f7cb1933 878 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
24922684 879 (!check_bytes_and_report(s, page, p, "Poison", p,
3b0efdfa 880 POISON_FREE, s->object_size - 1) ||
24922684 881 !check_bytes_and_report(s, page, p, "Poison",
3b0efdfa 882 p + s->object_size - 1, POISON_END, 1)))
81819f0f 883 return 0;
81819f0f
CL
884 /*
885 * check_pad_bytes cleans up on its own.
886 */
887 check_pad_bytes(s, page, p);
888 }
889
f7cb1933 890 if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0f
CL
891 /*
892 * Object and freepointer overlap. Cannot check
893 * freepointer while object is allocated.
894 */
895 return 1;
896
897 /* Check free pointer validity */
898 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
899 object_err(s, page, p, "Freepointer corrupt");
900 /*
9f6c708e 901 * No choice but to zap it and thus lose the remainder
81819f0f 902 * of the free objects in this slab. May cause
672bba3a 903 * another error because the object count is now wrong.
81819f0f 904 */
a973e9dd 905 set_freepointer(s, p, NULL);
81819f0f
CL
906 return 0;
907 }
908 return 1;
909}
910
911static int check_slab(struct kmem_cache *s, struct page *page)
912{
39b26464
CL
913 int maxobj;
914
81819f0f
CL
915 VM_BUG_ON(!irqs_disabled());
916
917 if (!PageSlab(page)) {
24922684 918 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
919 return 0;
920 }
39b26464 921
9736d2a9 922 maxobj = order_objects(compound_order(page), s->size);
39b26464
CL
923 if (page->objects > maxobj) {
924 slab_err(s, page, "objects %u > max %u",
f6edde9c 925 page->objects, maxobj);
39b26464
CL
926 return 0;
927 }
928 if (page->inuse > page->objects) {
24922684 929 slab_err(s, page, "inuse %u > max %u",
f6edde9c 930 page->inuse, page->objects);
81819f0f
CL
931 return 0;
932 }
933 /* Slab_pad_check fixes things up after itself */
934 slab_pad_check(s, page);
935 return 1;
936}
937
938/*
672bba3a
CL
939 * Determine if a certain object on a page is on the freelist. Must hold the
940 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
941 */
942static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
943{
944 int nr = 0;
881db7fb 945 void *fp;
81819f0f 946 void *object = NULL;
f6edde9c 947 int max_objects;
81819f0f 948
881db7fb 949 fp = page->freelist;
39b26464 950 while (fp && nr <= page->objects) {
81819f0f
CL
951 if (fp == search)
952 return 1;
953 if (!check_valid_pointer(s, page, fp)) {
954 if (object) {
955 object_err(s, page, object,
956 "Freechain corrupt");
a973e9dd 957 set_freepointer(s, object, NULL);
81819f0f 958 } else {
24922684 959 slab_err(s, page, "Freepointer corrupt");
a973e9dd 960 page->freelist = NULL;
39b26464 961 page->inuse = page->objects;
24922684 962 slab_fix(s, "Freelist cleared");
81819f0f
CL
963 return 0;
964 }
965 break;
966 }
967 object = fp;
968 fp = get_freepointer(s, object);
969 nr++;
970 }
971
9736d2a9 972 max_objects = order_objects(compound_order(page), s->size);
210b5c06
CG
973 if (max_objects > MAX_OBJS_PER_PAGE)
974 max_objects = MAX_OBJS_PER_PAGE;
224a88be
CL
975
976 if (page->objects != max_objects) {
756a025f
JP
977 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
978 page->objects, max_objects);
224a88be
CL
979 page->objects = max_objects;
980 slab_fix(s, "Number of objects adjusted.");
981 }
39b26464 982 if (page->inuse != page->objects - nr) {
756a025f
JP
983 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
984 page->inuse, page->objects - nr);
39b26464 985 page->inuse = page->objects - nr;
24922684 986 slab_fix(s, "Object count adjusted.");
81819f0f
CL
987 }
988 return search == NULL;
989}
990
0121c619
CL
991static void trace(struct kmem_cache *s, struct page *page, void *object,
992 int alloc)
3ec09742
CL
993{
994 if (s->flags & SLAB_TRACE) {
f9f58285 995 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
3ec09742
CL
996 s->name,
997 alloc ? "alloc" : "free",
998 object, page->inuse,
999 page->freelist);
1000
1001 if (!alloc)
aa2efd5e 1002 print_section(KERN_INFO, "Object ", (void *)object,
d0e0ac97 1003 s->object_size);
3ec09742
CL
1004
1005 dump_stack();
1006 }
1007}
1008
643b1138 1009/*
672bba3a 1010 * Tracking of fully allocated slabs for debugging purposes.
643b1138 1011 */
5cc6eee8
CL
1012static void add_full(struct kmem_cache *s,
1013 struct kmem_cache_node *n, struct page *page)
643b1138 1014{
5cc6eee8
CL
1015 if (!(s->flags & SLAB_STORE_USER))
1016 return;
1017
255d0884 1018 lockdep_assert_held(&n->list_lock);
643b1138 1019 list_add(&page->lru, &n->full);
643b1138
CL
1020}
1021
c65c1877 1022static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
643b1138 1023{
643b1138
CL
1024 if (!(s->flags & SLAB_STORE_USER))
1025 return;
1026
255d0884 1027 lockdep_assert_held(&n->list_lock);
643b1138 1028 list_del(&page->lru);
643b1138
CL
1029}
1030
0f389ec6
CL
1031/* Tracking of the number of slabs for debugging purposes */
1032static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1033{
1034 struct kmem_cache_node *n = get_node(s, node);
1035
1036 return atomic_long_read(&n->nr_slabs);
1037}
1038
26c02cf0
AB
1039static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1040{
1041 return atomic_long_read(&n->nr_slabs);
1042}
1043
205ab99d 1044static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1045{
1046 struct kmem_cache_node *n = get_node(s, node);
1047
1048 /*
1049 * May be called early in order to allocate a slab for the
1050 * kmem_cache_node structure. Solve the chicken-egg
1051 * dilemma by deferring the increment of the count during
1052 * bootstrap (see early_kmem_cache_node_alloc).
1053 */
338b2642 1054 if (likely(n)) {
0f389ec6 1055 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
1056 atomic_long_add(objects, &n->total_objects);
1057 }
0f389ec6 1058}
205ab99d 1059static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1060{
1061 struct kmem_cache_node *n = get_node(s, node);
1062
1063 atomic_long_dec(&n->nr_slabs);
205ab99d 1064 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
1065}
1066
1067/* Object debug checks for alloc/free paths */
3ec09742
CL
1068static void setup_object_debug(struct kmem_cache *s, struct page *page,
1069 void *object)
1070{
1071 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1072 return;
1073
f7cb1933 1074 init_object(s, object, SLUB_RED_INACTIVE);
3ec09742
CL
1075 init_tracking(s, object);
1076}
1077
becfda68 1078static inline int alloc_consistency_checks(struct kmem_cache *s,
d0e0ac97 1079 struct page *page,
ce71e27c 1080 void *object, unsigned long addr)
81819f0f
CL
1081{
1082 if (!check_slab(s, page))
becfda68 1083 return 0;
81819f0f 1084
81819f0f
CL
1085 if (!check_valid_pointer(s, page, object)) {
1086 object_err(s, page, object, "Freelist Pointer check fails");
becfda68 1087 return 0;
81819f0f
CL
1088 }
1089
f7cb1933 1090 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
becfda68
LA
1091 return 0;
1092
1093 return 1;
1094}
1095
1096static noinline int alloc_debug_processing(struct kmem_cache *s,
1097 struct page *page,
1098 void *object, unsigned long addr)
1099{
1100 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1101 if (!alloc_consistency_checks(s, page, object, addr))
1102 goto bad;
1103 }
81819f0f 1104
3ec09742
CL
1105 /* Success perform special debug activities for allocs */
1106 if (s->flags & SLAB_STORE_USER)
1107 set_track(s, object, TRACK_ALLOC, addr);
1108 trace(s, page, object, 1);
f7cb1933 1109 init_object(s, object, SLUB_RED_ACTIVE);
81819f0f 1110 return 1;
3ec09742 1111
81819f0f
CL
1112bad:
1113 if (PageSlab(page)) {
1114 /*
1115 * If this is a slab page then lets do the best we can
1116 * to avoid issues in the future. Marking all objects
672bba3a 1117 * as used avoids touching the remaining objects.
81819f0f 1118 */
24922684 1119 slab_fix(s, "Marking all objects used");
39b26464 1120 page->inuse = page->objects;
a973e9dd 1121 page->freelist = NULL;
81819f0f
CL
1122 }
1123 return 0;
1124}
1125
becfda68
LA
1126static inline int free_consistency_checks(struct kmem_cache *s,
1127 struct page *page, void *object, unsigned long addr)
81819f0f 1128{
81819f0f 1129 if (!check_valid_pointer(s, page, object)) {
70d71228 1130 slab_err(s, page, "Invalid object pointer 0x%p", object);
becfda68 1131 return 0;
81819f0f
CL
1132 }
1133
1134 if (on_freelist(s, page, object)) {
24922684 1135 object_err(s, page, object, "Object already free");
becfda68 1136 return 0;
81819f0f
CL
1137 }
1138
f7cb1933 1139 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
becfda68 1140 return 0;
81819f0f 1141
1b4f59e3 1142 if (unlikely(s != page->slab_cache)) {
3adbefee 1143 if (!PageSlab(page)) {
756a025f
JP
1144 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1145 object);
1b4f59e3 1146 } else if (!page->slab_cache) {
f9f58285
FF
1147 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1148 object);
70d71228 1149 dump_stack();
06428780 1150 } else
24922684
CL
1151 object_err(s, page, object,
1152 "page slab pointer corrupt.");
becfda68
LA
1153 return 0;
1154 }
1155 return 1;
1156}
1157
1158/* Supports checking bulk free of a constructed freelist */
1159static noinline int free_debug_processing(
1160 struct kmem_cache *s, struct page *page,
1161 void *head, void *tail, int bulk_cnt,
1162 unsigned long addr)
1163{
1164 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1165 void *object = head;
1166 int cnt = 0;
1167 unsigned long uninitialized_var(flags);
1168 int ret = 0;
1169
1170 spin_lock_irqsave(&n->list_lock, flags);
1171 slab_lock(page);
1172
1173 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1174 if (!check_slab(s, page))
1175 goto out;
1176 }
1177
1178next_object:
1179 cnt++;
1180
1181 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1182 if (!free_consistency_checks(s, page, object, addr))
1183 goto out;
81819f0f 1184 }
3ec09742 1185
3ec09742
CL
1186 if (s->flags & SLAB_STORE_USER)
1187 set_track(s, object, TRACK_FREE, addr);
1188 trace(s, page, object, 0);
81084651 1189 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
f7cb1933 1190 init_object(s, object, SLUB_RED_INACTIVE);
81084651
JDB
1191
1192 /* Reached end of constructed freelist yet? */
1193 if (object != tail) {
1194 object = get_freepointer(s, object);
1195 goto next_object;
1196 }
804aa132
LA
1197 ret = 1;
1198
5c2e4bbb 1199out:
81084651
JDB
1200 if (cnt != bulk_cnt)
1201 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1202 bulk_cnt, cnt);
1203
881db7fb 1204 slab_unlock(page);
282acb43 1205 spin_unlock_irqrestore(&n->list_lock, flags);
804aa132
LA
1206 if (!ret)
1207 slab_fix(s, "Object at 0x%p not freed", object);
1208 return ret;
81819f0f
CL
1209}
1210
41ecc55b
CL
1211static int __init setup_slub_debug(char *str)
1212{
f0630fff
CL
1213 slub_debug = DEBUG_DEFAULT_FLAGS;
1214 if (*str++ != '=' || !*str)
1215 /*
1216 * No options specified. Switch on full debugging.
1217 */
1218 goto out;
1219
1220 if (*str == ',')
1221 /*
1222 * No options but restriction on slabs. This means full
1223 * debugging for slabs matching a pattern.
1224 */
1225 goto check_slabs;
1226
1227 slub_debug = 0;
1228 if (*str == '-')
1229 /*
1230 * Switch off all debugging measures.
1231 */
1232 goto out;
1233
1234 /*
1235 * Determine which debug features should be switched on
1236 */
06428780 1237 for (; *str && *str != ','; str++) {
f0630fff
CL
1238 switch (tolower(*str)) {
1239 case 'f':
becfda68 1240 slub_debug |= SLAB_CONSISTENCY_CHECKS;
f0630fff
CL
1241 break;
1242 case 'z':
1243 slub_debug |= SLAB_RED_ZONE;
1244 break;
1245 case 'p':
1246 slub_debug |= SLAB_POISON;
1247 break;
1248 case 'u':
1249 slub_debug |= SLAB_STORE_USER;
1250 break;
1251 case 't':
1252 slub_debug |= SLAB_TRACE;
1253 break;
4c13dd3b
DM
1254 case 'a':
1255 slub_debug |= SLAB_FAILSLAB;
1256 break;
08303a73
CA
1257 case 'o':
1258 /*
1259 * Avoid enabling debugging on caches if its minimum
1260 * order would increase as a result.
1261 */
1262 disable_higher_order_debug = 1;
1263 break;
f0630fff 1264 default:
f9f58285
FF
1265 pr_err("slub_debug option '%c' unknown. skipped\n",
1266 *str);
f0630fff 1267 }
41ecc55b
CL
1268 }
1269
f0630fff 1270check_slabs:
41ecc55b
CL
1271 if (*str == ',')
1272 slub_debug_slabs = str + 1;
f0630fff 1273out:
41ecc55b
CL
1274 return 1;
1275}
1276
1277__setup("slub_debug", setup_slub_debug);
1278
c5fd3ca0
AT
1279/*
1280 * kmem_cache_flags - apply debugging options to the cache
1281 * @object_size: the size of an object without meta data
1282 * @flags: flags to set
1283 * @name: name of the cache
1284 * @ctor: constructor function
1285 *
1286 * Debug option(s) are applied to @flags. In addition to the debug
1287 * option(s), if a slab name (or multiple) is specified i.e.
1288 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1289 * then only the select slabs will receive the debug option(s).
1290 */
0293d1fd 1291slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112ed 1292 slab_flags_t flags, const char *name,
51cc5068 1293 void (*ctor)(void *))
41ecc55b 1294{
c5fd3ca0
AT
1295 char *iter;
1296 size_t len;
1297
1298 /* If slub_debug = 0, it folds into the if conditional. */
1299 if (!slub_debug_slabs)
1300 return flags | slub_debug;
1301
1302 len = strlen(name);
1303 iter = slub_debug_slabs;
1304 while (*iter) {
1305 char *end, *glob;
1306 size_t cmplen;
1307
1308 end = strchr(iter, ',');
1309 if (!end)
1310 end = iter + strlen(iter);
1311
1312 glob = strnchr(iter, end - iter, '*');
1313 if (glob)
1314 cmplen = glob - iter;
1315 else
1316 cmplen = max_t(size_t, len, (end - iter));
1317
1318 if (!strncmp(name, iter, cmplen)) {
1319 flags |= slub_debug;
1320 break;
1321 }
1322
1323 if (!*end)
1324 break;
1325 iter = end + 1;
1326 }
ba0268a8
CL
1327
1328 return flags;
41ecc55b 1329}
b4a64718 1330#else /* !CONFIG_SLUB_DEBUG */
3ec09742
CL
1331static inline void setup_object_debug(struct kmem_cache *s,
1332 struct page *page, void *object) {}
41ecc55b 1333
3ec09742 1334static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c 1335 struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b 1336
282acb43 1337static inline int free_debug_processing(
81084651
JDB
1338 struct kmem_cache *s, struct page *page,
1339 void *head, void *tail, int bulk_cnt,
282acb43 1340 unsigned long addr) { return 0; }
41ecc55b 1341
41ecc55b
CL
1342static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1343 { return 1; }
1344static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 1345 void *object, u8 val) { return 1; }
5cc6eee8
CL
1346static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1347 struct page *page) {}
c65c1877
PZ
1348static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1349 struct page *page) {}
0293d1fd 1350slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112ed 1351 slab_flags_t flags, const char *name,
51cc5068 1352 void (*ctor)(void *))
ba0268a8
CL
1353{
1354 return flags;
1355}
41ecc55b 1356#define slub_debug 0
0f389ec6 1357
fdaa45e9
IM
1358#define disable_higher_order_debug 0
1359
0f389ec6
CL
1360static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1361 { return 0; }
26c02cf0
AB
1362static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1363 { return 0; }
205ab99d
CL
1364static inline void inc_slabs_node(struct kmem_cache *s, int node,
1365 int objects) {}
1366static inline void dec_slabs_node(struct kmem_cache *s, int node,
1367 int objects) {}
7d550c56 1368
02e72cc6
AR
1369#endif /* CONFIG_SLUB_DEBUG */
1370
1371/*
1372 * Hooks for other subsystems that check memory allocations. In a typical
1373 * production configuration these hooks all should produce no code at all.
1374 */
0116523c 1375static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
d56791b3
RB
1376{
1377 kmemleak_alloc(ptr, size, 1, flags);
0116523c 1378 return kasan_kmalloc_large(ptr, size, flags);
d56791b3
RB
1379}
1380
ee3ce779 1381static __always_inline void kfree_hook(void *x)
d56791b3
RB
1382{
1383 kmemleak_free(x);
ee3ce779 1384 kasan_kfree_large(x, _RET_IP_);
d56791b3
RB
1385}
1386
c3895391 1387static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
d56791b3
RB
1388{
1389 kmemleak_free_recursive(x, s->flags);
7d550c56 1390
02e72cc6
AR
1391 /*
1392 * Trouble is that we may no longer disable interrupts in the fast path
1393 * So in order to make the debug calls that expect irqs to be
1394 * disabled we need to disable interrupts temporarily.
1395 */
4675ff05 1396#ifdef CONFIG_LOCKDEP
02e72cc6
AR
1397 {
1398 unsigned long flags;
1399
1400 local_irq_save(flags);
02e72cc6
AR
1401 debug_check_no_locks_freed(x, s->object_size);
1402 local_irq_restore(flags);
1403 }
1404#endif
1405 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1406 debug_check_no_obj_freed(x, s->object_size);
0316bec2 1407
c3895391
AK
1408 /* KASAN might put x into memory quarantine, delaying its reuse */
1409 return kasan_slab_free(s, x, _RET_IP_);
02e72cc6 1410}
205ab99d 1411
c3895391
AK
1412static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1413 void **head, void **tail)
81084651
JDB
1414{
1415/*
1416 * Compiler cannot detect this function can be removed if slab_free_hook()
1417 * evaluates to nothing. Thus, catch all relevant config debug options here.
1418 */
4675ff05 1419#if defined(CONFIG_LOCKDEP) || \
81084651
JDB
1420 defined(CONFIG_DEBUG_KMEMLEAK) || \
1421 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1422 defined(CONFIG_KASAN)
1423
c3895391
AK
1424 void *object;
1425 void *next = *head;
1426 void *old_tail = *tail ? *tail : *head;
1427
1428 /* Head and tail of the reconstructed freelist */
1429 *head = NULL;
1430 *tail = NULL;
81084651
JDB
1431
1432 do {
c3895391
AK
1433 object = next;
1434 next = get_freepointer(s, object);
1435 /* If object's reuse doesn't have to be delayed */
1436 if (!slab_free_hook(s, object)) {
1437 /* Move object to the new freelist */
1438 set_freepointer(s, object, *head);
1439 *head = object;
1440 if (!*tail)
1441 *tail = object;
1442 }
1443 } while (object != old_tail);
1444
1445 if (*head == *tail)
1446 *tail = NULL;
1447
1448 return *head != NULL;
1449#else
1450 return true;
81084651
JDB
1451#endif
1452}
1453
4d176711 1454static void *setup_object(struct kmem_cache *s, struct page *page,
588f8ba9
TG
1455 void *object)
1456{
1457 setup_object_debug(s, page, object);
4d176711 1458 object = kasan_init_slab_obj(s, object);
588f8ba9
TG
1459 if (unlikely(s->ctor)) {
1460 kasan_unpoison_object_data(s, object);
1461 s->ctor(object);
1462 kasan_poison_object_data(s, object);
1463 }
4d176711 1464 return object;
588f8ba9
TG
1465}
1466
81819f0f
CL
1467/*
1468 * Slab allocation and freeing
1469 */
5dfb4175
VD
1470static inline struct page *alloc_slab_page(struct kmem_cache *s,
1471 gfp_t flags, int node, struct kmem_cache_order_objects oo)
65c3376a 1472{
5dfb4175 1473 struct page *page;
19af27af 1474 unsigned int order = oo_order(oo);
65c3376a 1475
2154a336 1476 if (node == NUMA_NO_NODE)
5dfb4175 1477 page = alloc_pages(flags, order);
65c3376a 1478 else
96db800f 1479 page = __alloc_pages_node(node, flags, order);
5dfb4175 1480
f3ccb2c4
VD
1481 if (page && memcg_charge_slab(page, flags, order, s)) {
1482 __free_pages(page, order);
1483 page = NULL;
1484 }
5dfb4175
VD
1485
1486 return page;
65c3376a
CL
1487}
1488
210e7a43
TG
1489#ifdef CONFIG_SLAB_FREELIST_RANDOM
1490/* Pre-initialize the random sequence cache */
1491static int init_cache_random_seq(struct kmem_cache *s)
1492{
19af27af 1493 unsigned int count = oo_objects(s->oo);
210e7a43 1494 int err;
210e7a43 1495
a810007a
SR
1496 /* Bailout if already initialised */
1497 if (s->random_seq)
1498 return 0;
1499
210e7a43
TG
1500 err = cache_random_seq_create(s, count, GFP_KERNEL);
1501 if (err) {
1502 pr_err("SLUB: Unable to initialize free list for %s\n",
1503 s->name);
1504 return err;
1505 }
1506
1507 /* Transform to an offset on the set of pages */
1508 if (s->random_seq) {
19af27af
AD
1509 unsigned int i;
1510
210e7a43
TG
1511 for (i = 0; i < count; i++)
1512 s->random_seq[i] *= s->size;
1513 }
1514 return 0;
1515}
1516
1517/* Initialize each random sequence freelist per cache */
1518static void __init init_freelist_randomization(void)
1519{
1520 struct kmem_cache *s;
1521
1522 mutex_lock(&slab_mutex);
1523
1524 list_for_each_entry(s, &slab_caches, list)
1525 init_cache_random_seq(s);
1526
1527 mutex_unlock(&slab_mutex);
1528}
1529
1530/* Get the next entry on the pre-computed freelist randomized */
1531static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1532 unsigned long *pos, void *start,
1533 unsigned long page_limit,
1534 unsigned long freelist_count)
1535{
1536 unsigned int idx;
1537
1538 /*
1539 * If the target page allocation failed, the number of objects on the
1540 * page might be smaller than the usual size defined by the cache.
1541 */
1542 do {
1543 idx = s->random_seq[*pos];
1544 *pos += 1;
1545 if (*pos >= freelist_count)
1546 *pos = 0;
1547 } while (unlikely(idx >= page_limit));
1548
1549 return (char *)start + idx;
1550}
1551
1552/* Shuffle the single linked freelist based on a random pre-computed sequence */
1553static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1554{
1555 void *start;
1556 void *cur;
1557 void *next;
1558 unsigned long idx, pos, page_limit, freelist_count;
1559
1560 if (page->objects < 2 || !s->random_seq)
1561 return false;
1562
1563 freelist_count = oo_objects(s->oo);
1564 pos = get_random_int() % freelist_count;
1565
1566 page_limit = page->objects * s->size;
1567 start = fixup_red_left(s, page_address(page));
1568
1569 /* First entry is used as the base of the freelist */
1570 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1571 freelist_count);
4d176711 1572 cur = setup_object(s, page, cur);
210e7a43
TG
1573 page->freelist = cur;
1574
1575 for (idx = 1; idx < page->objects; idx++) {
210e7a43
TG
1576 next = next_freelist_entry(s, page, &pos, start, page_limit,
1577 freelist_count);
4d176711 1578 next = setup_object(s, page, next);
210e7a43
TG
1579 set_freepointer(s, cur, next);
1580 cur = next;
1581 }
210e7a43
TG
1582 set_freepointer(s, cur, NULL);
1583
1584 return true;
1585}
1586#else
1587static inline int init_cache_random_seq(struct kmem_cache *s)
1588{
1589 return 0;
1590}
1591static inline void init_freelist_randomization(void) { }
1592static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1593{
1594 return false;
1595}
1596#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1597
81819f0f
CL
1598static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1599{
06428780 1600 struct page *page;
834f3d11 1601 struct kmem_cache_order_objects oo = s->oo;
ba52270d 1602 gfp_t alloc_gfp;
4d176711 1603 void *start, *p, *next;
588f8ba9 1604 int idx, order;
210e7a43 1605 bool shuffle;
81819f0f 1606
7e0528da
CL
1607 flags &= gfp_allowed_mask;
1608
d0164adc 1609 if (gfpflags_allow_blocking(flags))
7e0528da
CL
1610 local_irq_enable();
1611
b7a49f0d 1612 flags |= s->allocflags;
e12ba74d 1613
ba52270d
PE
1614 /*
1615 * Let the initial higher-order allocation fail under memory pressure
1616 * so we fall-back to the minimum order allocation.
1617 */
1618 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
d0164adc 1619 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
444eb2a4 1620 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
ba52270d 1621
5dfb4175 1622 page = alloc_slab_page(s, alloc_gfp, node, oo);
65c3376a
CL
1623 if (unlikely(!page)) {
1624 oo = s->min;
80c3a998 1625 alloc_gfp = flags;
65c3376a
CL
1626 /*
1627 * Allocation may have failed due to fragmentation.
1628 * Try a lower order alloc if possible
1629 */
5dfb4175 1630 page = alloc_slab_page(s, alloc_gfp, node, oo);
588f8ba9
TG
1631 if (unlikely(!page))
1632 goto out;
1633 stat(s, ORDER_FALLBACK);
65c3376a 1634 }
5a896d9e 1635
834f3d11 1636 page->objects = oo_objects(oo);
81819f0f 1637
1f458cbf 1638 order = compound_order(page);
1b4f59e3 1639 page->slab_cache = s;
c03f94cc 1640 __SetPageSlab(page);
2f064f34 1641 if (page_is_pfmemalloc(page))
072bb0aa 1642 SetPageSlabPfmemalloc(page);
81819f0f
CL
1643
1644 start = page_address(page);
81819f0f
CL
1645
1646 if (unlikely(s->flags & SLAB_POISON))
1f458cbf 1647 memset(start, POISON_INUSE, PAGE_SIZE << order);
81819f0f 1648
0316bec2
AR
1649 kasan_poison_slab(page);
1650
210e7a43
TG
1651 shuffle = shuffle_freelist(s, page);
1652
1653 if (!shuffle) {
1654 for_each_object_idx(p, idx, s, start, page->objects) {
4d176711
AK
1655 if (likely(idx < page->objects)) {
1656 next = p + s->size;
1657 next = setup_object(s, page, next);
1658 set_freepointer(s, p, next);
1659 } else
210e7a43
TG
1660 set_freepointer(s, p, NULL);
1661 }
4d176711
AK
1662 start = fixup_red_left(s, start);
1663 start = setup_object(s, page, start);
1664 page->freelist = start;
81819f0f 1665 }
81819f0f 1666
e6e82ea1 1667 page->inuse = page->objects;
8cb0a506 1668 page->frozen = 1;
588f8ba9 1669
81819f0f 1670out:
d0164adc 1671 if (gfpflags_allow_blocking(flags))
588f8ba9
TG
1672 local_irq_disable();
1673 if (!page)
1674 return NULL;
1675
7779f212 1676 mod_lruvec_page_state(page,
588f8ba9
TG
1677 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1678 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1679 1 << oo_order(oo));
1680
1681 inc_slabs_node(s, page_to_nid(page), page->objects);
1682
81819f0f
CL
1683 return page;
1684}
1685
588f8ba9
TG
1686static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1687{
1688 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
bacdcb34 1689 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
72baeef0
MH
1690 flags &= ~GFP_SLAB_BUG_MASK;
1691 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1692 invalid_mask, &invalid_mask, flags, &flags);
65b9de75 1693 dump_stack();
588f8ba9
TG
1694 }
1695
1696 return allocate_slab(s,
1697 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1698}
1699
81819f0f
CL
1700static void __free_slab(struct kmem_cache *s, struct page *page)
1701{
834f3d11
CL
1702 int order = compound_order(page);
1703 int pages = 1 << order;
81819f0f 1704
becfda68 1705 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
81819f0f
CL
1706 void *p;
1707
1708 slab_pad_check(s, page);
224a88be
CL
1709 for_each_object(p, s, page_address(page),
1710 page->objects)
f7cb1933 1711 check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0f
CL
1712 }
1713
7779f212 1714 mod_lruvec_page_state(page,
81819f0f
CL
1715 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1716 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1717 -pages);
81819f0f 1718
072bb0aa 1719 __ClearPageSlabPfmemalloc(page);
49bd5221 1720 __ClearPageSlab(page);
1f458cbf 1721
d4fc5069 1722 page->mapping = NULL;
1eb5ac64
NP
1723 if (current->reclaim_state)
1724 current->reclaim_state->reclaimed_slab += pages;
27ee57c9
VD
1725 memcg_uncharge_slab(page, order, s);
1726 __free_pages(page, order);
81819f0f
CL
1727}
1728
1729static void rcu_free_slab(struct rcu_head *h)
1730{
bf68c214 1731 struct page *page = container_of(h, struct page, rcu_head);
da9a638c 1732
1b4f59e3 1733 __free_slab(page->slab_cache, page);
81819f0f
CL
1734}
1735
1736static void free_slab(struct kmem_cache *s, struct page *page)
1737{
5f0d5a3a 1738 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
bf68c214 1739 call_rcu(&page->rcu_head, rcu_free_slab);
81819f0f
CL
1740 } else
1741 __free_slab(s, page);
1742}
1743
1744static void discard_slab(struct kmem_cache *s, struct page *page)
1745{
205ab99d 1746 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1747 free_slab(s, page);
1748}
1749
1750/*
5cc6eee8 1751 * Management of partially allocated slabs.
81819f0f 1752 */
1e4dd946
SR
1753static inline void
1754__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
81819f0f 1755{
e95eed57 1756 n->nr_partial++;
136333d1 1757 if (tail == DEACTIVATE_TO_TAIL)
7c2e132c
CL
1758 list_add_tail(&page->lru, &n->partial);
1759 else
1760 list_add(&page->lru, &n->partial);
81819f0f
CL
1761}
1762
1e4dd946
SR
1763static inline void add_partial(struct kmem_cache_node *n,
1764 struct page *page, int tail)
62e346a8 1765{
c65c1877 1766 lockdep_assert_held(&n->list_lock);
1e4dd946
SR
1767 __add_partial(n, page, tail);
1768}
c65c1877 1769
1e4dd946
SR
1770static inline void remove_partial(struct kmem_cache_node *n,
1771 struct page *page)
1772{
1773 lockdep_assert_held(&n->list_lock);
52b4b950
DS
1774 list_del(&page->lru);
1775 n->nr_partial--;
1e4dd946
SR
1776}
1777
81819f0f 1778/*
7ced3719
CL
1779 * Remove slab from the partial list, freeze it and
1780 * return the pointer to the freelist.
81819f0f 1781 *
497b66f2 1782 * Returns a list of objects or NULL if it fails.
81819f0f 1783 */
497b66f2 1784static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1 1785 struct kmem_cache_node *n, struct page *page,
633b0764 1786 int mode, int *objects)
81819f0f 1787{
2cfb7455
CL
1788 void *freelist;
1789 unsigned long counters;
1790 struct page new;
1791
c65c1877
PZ
1792 lockdep_assert_held(&n->list_lock);
1793
2cfb7455
CL
1794 /*
1795 * Zap the freelist and set the frozen bit.
1796 * The old freelist is the list of objects for the
1797 * per cpu allocation list.
1798 */
7ced3719
CL
1799 freelist = page->freelist;
1800 counters = page->counters;
1801 new.counters = counters;
633b0764 1802 *objects = new.objects - new.inuse;
23910c50 1803 if (mode) {
7ced3719 1804 new.inuse = page->objects;
23910c50
PE
1805 new.freelist = NULL;
1806 } else {
1807 new.freelist = freelist;
1808 }
2cfb7455 1809
a0132ac0 1810 VM_BUG_ON(new.frozen);
7ced3719 1811 new.frozen = 1;
2cfb7455 1812
7ced3719 1813 if (!__cmpxchg_double_slab(s, page,
2cfb7455 1814 freelist, counters,
02d7633f 1815 new.freelist, new.counters,
7ced3719 1816 "acquire_slab"))
7ced3719 1817 return NULL;
2cfb7455
CL
1818
1819 remove_partial(n, page);
7ced3719 1820 WARN_ON(!freelist);
49e22585 1821 return freelist;
81819f0f
CL
1822}
1823
633b0764 1824static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
8ba00bb6 1825static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
49e22585 1826
81819f0f 1827/*
672bba3a 1828 * Try to allocate a partial slab from a specific node.
81819f0f 1829 */
8ba00bb6
JK
1830static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1831 struct kmem_cache_cpu *c, gfp_t flags)
81819f0f 1832{
49e22585
CL
1833 struct page *page, *page2;
1834 void *object = NULL;
e5d9998f 1835 unsigned int available = 0;
633b0764 1836 int objects;
81819f0f
CL
1837
1838 /*
1839 * Racy check. If we mistakenly see no partial slabs then we
1840 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1841 * partial slab and there is none available then get_partials()
1842 * will return NULL.
81819f0f
CL
1843 */
1844 if (!n || !n->nr_partial)
1845 return NULL;
1846
1847 spin_lock(&n->list_lock);
49e22585 1848 list_for_each_entry_safe(page, page2, &n->partial, lru) {
8ba00bb6 1849 void *t;
49e22585 1850
8ba00bb6
JK
1851 if (!pfmemalloc_match(page, flags))
1852 continue;
1853
633b0764 1854 t = acquire_slab(s, n, page, object == NULL, &objects);
49e22585
CL
1855 if (!t)
1856 break;
1857
633b0764 1858 available += objects;
12d79634 1859 if (!object) {
49e22585 1860 c->page = page;
49e22585 1861 stat(s, ALLOC_FROM_PARTIAL);
49e22585 1862 object = t;
49e22585 1863 } else {
633b0764 1864 put_cpu_partial(s, page, 0);
8028dcea 1865 stat(s, CPU_PARTIAL_NODE);
49e22585 1866 }
345c905d 1867 if (!kmem_cache_has_cpu_partial(s)
e6d0e1dc 1868 || available > slub_cpu_partial(s) / 2)
49e22585
CL
1869 break;
1870
497b66f2 1871 }
81819f0f 1872 spin_unlock(&n->list_lock);
497b66f2 1873 return object;
81819f0f
CL
1874}
1875
1876/*
672bba3a 1877 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f 1878 */
de3ec035 1879static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
acd19fd1 1880 struct kmem_cache_cpu *c)
81819f0f
CL
1881{
1882#ifdef CONFIG_NUMA
1883 struct zonelist *zonelist;
dd1a239f 1884 struct zoneref *z;
54a6eb5c
MG
1885 struct zone *zone;
1886 enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2 1887 void *object;
cc9a6c87 1888 unsigned int cpuset_mems_cookie;
81819f0f
CL
1889
1890 /*
672bba3a
CL
1891 * The defrag ratio allows a configuration of the tradeoffs between
1892 * inter node defragmentation and node local allocations. A lower
1893 * defrag_ratio increases the tendency to do local allocations
1894 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1895 *
672bba3a
CL
1896 * If the defrag_ratio is set to 0 then kmalloc() always
1897 * returns node local objects. If the ratio is higher then kmalloc()
1898 * may return off node objects because partial slabs are obtained
1899 * from other nodes and filled up.
81819f0f 1900 *
43efd3ea
LP
1901 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1902 * (which makes defrag_ratio = 1000) then every (well almost)
1903 * allocation will first attempt to defrag slab caches on other nodes.
1904 * This means scanning over all nodes to look for partial slabs which
1905 * may be expensive if we do it every time we are trying to find a slab
672bba3a 1906 * with available objects.
81819f0f 1907 */
9824601e
CL
1908 if (!s->remote_node_defrag_ratio ||
1909 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1910 return NULL;
1911
cc9a6c87 1912 do {
d26914d1 1913 cpuset_mems_cookie = read_mems_allowed_begin();
2a389610 1914 zonelist = node_zonelist(mempolicy_slab_node(), flags);
cc9a6c87
MG
1915 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1916 struct kmem_cache_node *n;
1917
1918 n = get_node(s, zone_to_nid(zone));
1919
dee2f8aa 1920 if (n && cpuset_zone_allowed(zone, flags) &&
cc9a6c87 1921 n->nr_partial > s->min_partial) {
8ba00bb6 1922 object = get_partial_node(s, n, c, flags);
cc9a6c87
MG
1923 if (object) {
1924 /*
d26914d1
MG
1925 * Don't check read_mems_allowed_retry()
1926 * here - if mems_allowed was updated in
1927 * parallel, that was a harmless race
1928 * between allocation and the cpuset
1929 * update
cc9a6c87 1930 */
cc9a6c87
MG
1931 return object;
1932 }
c0ff7453 1933 }
81819f0f 1934 }
d26914d1 1935 } while (read_mems_allowed_retry(cpuset_mems_cookie));
81819f0f
CL
1936#endif
1937 return NULL;
1938}
1939
1940/*
1941 * Get a partial page, lock it and return it.
1942 */
497b66f2 1943static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1 1944 struct kmem_cache_cpu *c)
81819f0f 1945{
497b66f2 1946 void *object;
a561ce00
JK
1947 int searchnode = node;
1948
1949 if (node == NUMA_NO_NODE)
1950 searchnode = numa_mem_id();
1951 else if (!node_present_pages(node))
1952 searchnode = node_to_mem_node(node);
81819f0f 1953
8ba00bb6 1954 object = get_partial_node(s, get_node(s, searchnode), c, flags);
497b66f2
CL
1955 if (object || node != NUMA_NO_NODE)
1956 return object;
81819f0f 1957
acd19fd1 1958 return get_any_partial(s, flags, c);
81819f0f
CL
1959}
1960
8a5ec0ba
CL
1961#ifdef CONFIG_PREEMPT
1962/*
1963 * Calculate the next globally unique transaction for disambiguiation
1964 * during cmpxchg. The transactions start with the cpu number and are then
1965 * incremented by CONFIG_NR_CPUS.
1966 */
1967#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1968#else
1969/*
1970 * No preemption supported therefore also no need to check for
1971 * different cpus.
1972 */
1973#define TID_STEP 1
1974#endif
1975
1976static inline unsigned long next_tid(unsigned long tid)
1977{
1978 return tid + TID_STEP;
1979}
1980
1981static inline unsigned int tid_to_cpu(unsigned long tid)
1982{
1983 return tid % TID_STEP;
1984}
1985
1986static inline unsigned long tid_to_event(unsigned long tid)
1987{
1988 return tid / TID_STEP;
1989}
1990
1991static inline unsigned int init_tid(int cpu)
1992{
1993 return cpu;
1994}
1995
1996static inline void note_cmpxchg_failure(const char *n,
1997 const struct kmem_cache *s, unsigned long tid)
1998{
1999#ifdef SLUB_DEBUG_CMPXCHG
2000 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2001
f9f58285 2002 pr_info("%s %s: cmpxchg redo ", n, s->name);
8a5ec0ba
CL
2003
2004#ifdef CONFIG_PREEMPT
2005 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
f9f58285 2006 pr_warn("due to cpu change %d -> %d\n",
8a5ec0ba
CL
2007 tid_to_cpu(tid), tid_to_cpu(actual_tid));
2008 else
2009#endif
2010 if (tid_to_event(tid) != tid_to_event(actual_tid))
f9f58285 2011 pr_warn("due to cpu running other code. Event %ld->%ld\n",
8a5ec0ba
CL
2012 tid_to_event(tid), tid_to_event(actual_tid));
2013 else
f9f58285 2014 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
8a5ec0ba
CL
2015 actual_tid, tid, next_tid(tid));
2016#endif
4fdccdfb 2017 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba
CL
2018}
2019
788e1aad 2020static void init_kmem_cache_cpus(struct kmem_cache *s)
8a5ec0ba 2021{
8a5ec0ba
CL
2022 int cpu;
2023
2024 for_each_possible_cpu(cpu)
2025 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba 2026}
2cfb7455 2027
81819f0f
CL
2028/*
2029 * Remove the cpu slab
2030 */
d0e0ac97 2031static void deactivate_slab(struct kmem_cache *s, struct page *page,
d4ff6d35 2032 void *freelist, struct kmem_cache_cpu *c)
81819f0f 2033{
2cfb7455 2034 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2cfb7455
CL
2035 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2036 int lock = 0;
2037 enum slab_modes l = M_NONE, m = M_NONE;
2cfb7455 2038 void *nextfree;
136333d1 2039 int tail = DEACTIVATE_TO_HEAD;
2cfb7455
CL
2040 struct page new;
2041 struct page old;
2042
2043 if (page->freelist) {
84e554e6 2044 stat(s, DEACTIVATE_REMOTE_FREES);
136333d1 2045 tail = DEACTIVATE_TO_TAIL;
2cfb7455
CL
2046 }
2047
894b8788 2048 /*
2cfb7455
CL
2049 * Stage one: Free all available per cpu objects back
2050 * to the page freelist while it is still frozen. Leave the
2051 * last one.
2052 *
2053 * There is no need to take the list->lock because the page
2054 * is still frozen.
2055 */
2056 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2057 void *prior;
2058 unsigned long counters;
2059
2060 do {
2061 prior = page->freelist;
2062 counters = page->counters;
2063 set_freepointer(s, freelist, prior);
2064 new.counters = counters;
2065 new.inuse--;
a0132ac0 2066 VM_BUG_ON(!new.frozen);
2cfb7455 2067
1d07171c 2068 } while (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2069 prior, counters,
2070 freelist, new.counters,
2071 "drain percpu freelist"));
2072
2073 freelist = nextfree;
2074 }
2075
894b8788 2076 /*
2cfb7455
CL
2077 * Stage two: Ensure that the page is unfrozen while the
2078 * list presence reflects the actual number of objects
2079 * during unfreeze.
2080 *
2081 * We setup the list membership and then perform a cmpxchg
2082 * with the count. If there is a mismatch then the page
2083 * is not unfrozen but the page is on the wrong list.
2084 *
2085 * Then we restart the process which may have to remove
2086 * the page from the list that we just put it on again
2087 * because the number of objects in the slab may have
2088 * changed.
894b8788 2089 */
2cfb7455 2090redo:
894b8788 2091
2cfb7455
CL
2092 old.freelist = page->freelist;
2093 old.counters = page->counters;
a0132ac0 2094 VM_BUG_ON(!old.frozen);
7c2e132c 2095
2cfb7455
CL
2096 /* Determine target state of the slab */
2097 new.counters = old.counters;
2098 if (freelist) {
2099 new.inuse--;
2100 set_freepointer(s, freelist, old.freelist);
2101 new.freelist = freelist;
2102 } else
2103 new.freelist = old.freelist;
2104
2105 new.frozen = 0;
2106
8a5b20ae 2107 if (!new.inuse && n->nr_partial >= s->min_partial)
2cfb7455
CL
2108 m = M_FREE;
2109 else if (new.freelist) {
2110 m = M_PARTIAL;
2111 if (!lock) {
2112 lock = 1;
2113 /*
2114 * Taking the spinlock removes the possiblity
2115 * that acquire_slab() will see a slab page that
2116 * is frozen
2117 */
2118 spin_lock(&n->list_lock);
2119 }
2120 } else {
2121 m = M_FULL;
2122 if (kmem_cache_debug(s) && !lock) {
2123 lock = 1;
2124 /*
2125 * This also ensures that the scanning of full
2126 * slabs from diagnostic functions will not see
2127 * any frozen slabs.
2128 */
2129 spin_lock(&n->list_lock);
2130 }
2131 }
2132
2133 if (l != m) {
2cfb7455 2134 if (l == M_PARTIAL)
2cfb7455 2135 remove_partial(n, page);
2cfb7455 2136 else if (l == M_FULL)
c65c1877 2137 remove_full(s, n, page);
2cfb7455 2138
88349a28 2139 if (m == M_PARTIAL)
2cfb7455 2140 add_partial(n, page, tail);
88349a28 2141 else if (m == M_FULL)
2cfb7455 2142 add_full(s, n, page);
2cfb7455
CL
2143 }
2144
2145 l = m;
1d07171c 2146 if (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
2147 old.freelist, old.counters,
2148 new.freelist, new.counters,
2149 "unfreezing slab"))
2150 goto redo;
2151
2cfb7455
CL
2152 if (lock)
2153 spin_unlock(&n->list_lock);
2154
88349a28
WY
2155 if (m == M_PARTIAL)
2156 stat(s, tail);
2157 else if (m == M_FULL)
2158 stat(s, DEACTIVATE_FULL);
2159 else if (m == M_FREE) {
2cfb7455
CL
2160 stat(s, DEACTIVATE_EMPTY);
2161 discard_slab(s, page);
2162 stat(s, FREE_SLAB);
894b8788 2163 }
d4ff6d35
WY
2164
2165 c->page = NULL;
2166 c->freelist = NULL;
81819f0f
CL
2167}
2168
d24ac77f
JK
2169/*
2170 * Unfreeze all the cpu partial slabs.
2171 *
59a09917
CL
2172 * This function must be called with interrupts disabled
2173 * for the cpu using c (or some other guarantee must be there
2174 * to guarantee no concurrent accesses).
d24ac77f 2175 */
59a09917
CL
2176static void unfreeze_partials(struct kmem_cache *s,
2177 struct kmem_cache_cpu *c)
49e22585 2178{
345c905d 2179#ifdef CONFIG_SLUB_CPU_PARTIAL
43d77867 2180 struct kmem_cache_node *n = NULL, *n2 = NULL;
9ada1934 2181 struct page *page, *discard_page = NULL;
49e22585
CL
2182
2183 while ((page = c->partial)) {
49e22585
CL
2184 struct page new;
2185 struct page old;
2186
2187 c->partial = page->next;
43d77867
JK
2188
2189 n2 = get_node(s, page_to_nid(page));
2190 if (n != n2) {
2191 if (n)
2192 spin_unlock(&n->list_lock);
2193
2194 n = n2;
2195 spin_lock(&n->list_lock);
2196 }
49e22585
CL
2197
2198 do {
2199
2200 old.freelist = page->freelist;
2201 old.counters = page->counters;
a0132ac0 2202 VM_BUG_ON(!old.frozen);
49e22585
CL
2203
2204 new.counters = old.counters;
2205 new.freelist = old.freelist;
2206
2207 new.frozen = 0;
2208
d24ac77f 2209 } while (!__cmpxchg_double_slab(s, page,
49e22585
CL
2210 old.freelist, old.counters,
2211 new.freelist, new.counters,
2212 "unfreezing slab"));
2213
8a5b20ae 2214 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
9ada1934
SL
2215 page->next = discard_page;
2216 discard_page = page;
43d77867
JK
2217 } else {
2218 add_partial(n, page, DEACTIVATE_TO_TAIL);
2219 stat(s, FREE_ADD_PARTIAL);
49e22585
CL
2220 }
2221 }
2222
2223 if (n)
2224 spin_unlock(&n->list_lock);
9ada1934
SL
2225
2226 while (discard_page) {
2227 page = discard_page;
2228 discard_page = discard_page->next;
2229
2230 stat(s, DEACTIVATE_EMPTY);
2231 discard_slab(s, page);
2232 stat(s, FREE_SLAB);
2233 }
345c905d 2234#endif
49e22585
CL
2235}
2236
2237/*
2238 * Put a page that was just frozen (in __slab_free) into a partial page
0d2d5d40 2239 * slot if available.
49e22585
CL
2240 *
2241 * If we did not find a slot then simply move all the partials to the
2242 * per node partial list.
2243 */
633b0764 2244static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
49e22585 2245{
345c905d 2246#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585
CL
2247 struct page *oldpage;
2248 int pages;
2249 int pobjects;
2250
d6e0b7fa 2251 preempt_disable();
49e22585
CL
2252 do {
2253 pages = 0;
2254 pobjects = 0;
2255 oldpage = this_cpu_read(s->cpu_slab->partial);
2256
2257 if (oldpage) {
2258 pobjects = oldpage->pobjects;
2259 pages = oldpage->pages;
2260 if (drain && pobjects > s->cpu_partial) {
2261 unsigned long flags;
2262 /*
2263 * partial array is full. Move the existing
2264 * set to the per node partial list.
2265 */
2266 local_irq_save(flags);
59a09917 2267 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
49e22585 2268 local_irq_restore(flags);
e24fc410 2269 oldpage = NULL;
49e22585
CL
2270 pobjects = 0;
2271 pages = 0;
8028dcea 2272 stat(s, CPU_PARTIAL_DRAIN);
49e22585
CL
2273 }
2274 }
2275
2276 pages++;
2277 pobjects += page->objects - page->inuse;
2278
2279 page->pages = pages;
2280 page->pobjects = pobjects;
2281 page->next = oldpage;
2282
d0e0ac97
CG
2283 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2284 != oldpage);
d6e0b7fa
VD
2285 if (unlikely(!s->cpu_partial)) {
2286 unsigned long flags;
2287
2288 local_irq_save(flags);
2289 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2290 local_irq_restore(flags);
2291 }
2292 preempt_enable();
345c905d 2293#endif
49e22585
CL
2294}
2295
dfb4f096 2296static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 2297{
84e554e6 2298 stat(s, CPUSLAB_FLUSH);
d4ff6d35 2299 deactivate_slab(s, c->page, c->freelist, c);
c17dda40
CL
2300
2301 c->tid = next_tid(c->tid);
81819f0f
CL
2302}
2303
2304/*
2305 * Flush cpu slab.
6446faa2 2306 *
81819f0f
CL
2307 * Called from IPI handler with interrupts disabled.
2308 */
0c710013 2309static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 2310{
9dfc6e68 2311 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0f 2312
1265ef2d
WY
2313 if (c->page)
2314 flush_slab(s, c);
49e22585 2315
1265ef2d 2316 unfreeze_partials(s, c);
81819f0f
CL
2317}
2318
2319static void flush_cpu_slab(void *d)
2320{
2321 struct kmem_cache *s = d;
81819f0f 2322
dfb4f096 2323 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
2324}
2325
a8364d55
GBY
2326static bool has_cpu_slab(int cpu, void *info)
2327{
2328 struct kmem_cache *s = info;
2329 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2330
a93cf07b 2331 return c->page || slub_percpu_partial(c);
a8364d55
GBY
2332}
2333
81819f0f
CL
2334static void flush_all(struct kmem_cache *s)
2335{
a8364d55 2336 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
81819f0f
CL
2337}
2338
a96a87bf
SAS
2339/*
2340 * Use the cpu notifier to insure that the cpu slabs are flushed when
2341 * necessary.
2342 */
2343static int slub_cpu_dead(unsigned int cpu)
2344{
2345 struct kmem_cache *s;
2346 unsigned long flags;
2347
2348 mutex_lock(&slab_mutex);
2349 list_for_each_entry(s, &slab_caches, list) {
2350 local_irq_save(flags);
2351 __flush_cpu_slab(s, cpu);
2352 local_irq_restore(flags);
2353 }
2354 mutex_unlock(&slab_mutex);
2355 return 0;
2356}
2357
dfb4f096
CL
2358/*
2359 * Check if the objects in a per cpu structure fit numa
2360 * locality expectations.
2361 */
57d437d2 2362static inline int node_match(struct page *page, int node)
dfb4f096
CL
2363{
2364#ifdef CONFIG_NUMA
6159d0f5 2365 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
dfb4f096
CL
2366 return 0;
2367#endif
2368 return 1;
2369}
2370
9a02d699 2371#ifdef CONFIG_SLUB_DEBUG
781b2ba6
PE
2372static int count_free(struct page *page)
2373{
2374 return page->objects - page->inuse;
2375}
2376
9a02d699
DR
2377static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2378{
2379 return atomic_long_read(&n->total_objects);
2380}
2381#endif /* CONFIG_SLUB_DEBUG */
2382
2383#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
781b2ba6
PE
2384static unsigned long count_partial(struct kmem_cache_node *n,
2385 int (*get_count)(struct page *))
2386{
2387 unsigned long flags;
2388 unsigned long x = 0;
2389 struct page *page;
2390
2391 spin_lock_irqsave(&n->list_lock, flags);
2392 list_for_each_entry(page, &n->partial, lru)
2393 x += get_count(page);
2394 spin_unlock_irqrestore(&n->list_lock, flags);
2395 return x;
2396}
9a02d699 2397#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
26c02cf0 2398
781b2ba6
PE
2399static noinline void
2400slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2401{
9a02d699
DR
2402#ifdef CONFIG_SLUB_DEBUG
2403 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2404 DEFAULT_RATELIMIT_BURST);
781b2ba6 2405 int node;
fa45dc25 2406 struct kmem_cache_node *n;
781b2ba6 2407
9a02d699
DR
2408 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2409 return;
2410
5b3810e5
VB
2411 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2412 nid, gfpflags, &gfpflags);
19af27af 2413 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
f9f58285
FF
2414 s->name, s->object_size, s->size, oo_order(s->oo),
2415 oo_order(s->min));
781b2ba6 2416
3b0efdfa 2417 if (oo_order(s->min) > get_order(s->object_size))
f9f58285
FF
2418 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2419 s->name);
fa5ec8a1 2420
fa45dc25 2421 for_each_kmem_cache_node(s, node, n) {
781b2ba6
PE
2422 unsigned long nr_slabs;
2423 unsigned long nr_objs;
2424 unsigned long nr_free;
2425
26c02cf0
AB
2426 nr_free = count_partial(n, count_free);
2427 nr_slabs = node_nr_slabs(n);
2428 nr_objs = node_nr_objs(n);
781b2ba6 2429
f9f58285 2430 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
781b2ba6
PE
2431 node, nr_slabs, nr_objs, nr_free);
2432 }
9a02d699 2433#endif
781b2ba6
PE
2434}
2435
497b66f2
CL
2436static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2437 int node, struct kmem_cache_cpu **pc)
2438{
6faa6833 2439 void *freelist;
188fd063
CL
2440 struct kmem_cache_cpu *c = *pc;
2441 struct page *page;
497b66f2 2442
128227e7
MW
2443 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2444
188fd063 2445 freelist = get_partial(s, flags, node, c);
497b66f2 2446
188fd063
CL
2447 if (freelist)
2448 return freelist;
2449
2450 page = new_slab(s, flags, node);
497b66f2 2451 if (page) {
7c8e0181 2452 c = raw_cpu_ptr(s->cpu_slab);
497b66f2
CL
2453 if (c->page)
2454 flush_slab(s, c);
2455
2456 /*
2457 * No other reference to the page yet so we can
2458 * muck around with it freely without cmpxchg
2459 */
6faa6833 2460 freelist = page->freelist;
497b66f2
CL
2461 page->freelist = NULL;
2462
2463 stat(s, ALLOC_SLAB);
497b66f2
CL
2464 c->page = page;
2465 *pc = c;
2466 } else
6faa6833 2467 freelist = NULL;
497b66f2 2468
6faa6833 2469 return freelist;
497b66f2
CL
2470}
2471
072bb0aa
MG
2472static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2473{
2474 if (unlikely(PageSlabPfmemalloc(page)))
2475 return gfp_pfmemalloc_allowed(gfpflags);
2476
2477 return true;
2478}
2479
213eeb9f 2480/*
d0e0ac97
CG
2481 * Check the page->freelist of a page and either transfer the freelist to the
2482 * per cpu freelist or deactivate the page.
213eeb9f
CL
2483 *
2484 * The page is still frozen if the return value is not NULL.
2485 *
2486 * If this function returns NULL then the page has been unfrozen.
d24ac77f
JK
2487 *
2488 * This function must be called with interrupt disabled.
213eeb9f
CL
2489 */
2490static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2491{
2492 struct page new;
2493 unsigned long counters;
2494 void *freelist;
2495
2496 do {
2497 freelist = page->freelist;
2498 counters = page->counters;
6faa6833 2499
213eeb9f 2500 new.counters = counters;
a0132ac0 2501 VM_BUG_ON(!new.frozen);
213eeb9f
CL
2502
2503 new.inuse = page->objects;
2504 new.frozen = freelist != NULL;
2505
d24ac77f 2506 } while (!__cmpxchg_double_slab(s, page,
213eeb9f
CL
2507 freelist, counters,
2508 NULL, new.counters,
2509 "get_freelist"));
2510
2511 return freelist;
2512}
2513
81819f0f 2514/*
894b8788
CL
2515 * Slow path. The lockless freelist is empty or we need to perform
2516 * debugging duties.
2517 *
894b8788
CL
2518 * Processing is still very fast if new objects have been freed to the
2519 * regular freelist. In that case we simply take over the regular freelist
2520 * as the lockless freelist and zap the regular freelist.
81819f0f 2521 *
894b8788
CL
2522 * If that is not working then we fall back to the partial lists. We take the
2523 * first element of the freelist as the object to allocate now and move the
2524 * rest of the freelist to the lockless freelist.
81819f0f 2525 *
894b8788 2526 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
2527 * we need to allocate a new slab. This is the slowest path since it involves
2528 * a call to the page allocator and the setup of a new slab.
a380a3c7
CL
2529 *
2530 * Version of __slab_alloc to use when we know that interrupts are
2531 * already disabled (which is the case for bulk allocation).
81819f0f 2532 */
a380a3c7 2533static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
ce71e27c 2534 unsigned long addr, struct kmem_cache_cpu *c)
81819f0f 2535{
6faa6833 2536 void *freelist;
f6e7def7 2537 struct page *page;
81819f0f 2538
f6e7def7
CL
2539 page = c->page;
2540 if (!page)
81819f0f 2541 goto new_slab;
49e22585 2542redo:
6faa6833 2543
57d437d2 2544 if (unlikely(!node_match(page, node))) {
a561ce00
JK
2545 int searchnode = node;
2546
2547 if (node != NUMA_NO_NODE && !node_present_pages(node))
2548 searchnode = node_to_mem_node(node);
2549
2550 if (unlikely(!node_match(page, searchnode))) {
2551 stat(s, ALLOC_NODE_MISMATCH);
d4ff6d35 2552 deactivate_slab(s, page, c->freelist, c);
a561ce00
JK
2553 goto new_slab;
2554 }
fc59c053 2555 }
6446faa2 2556
072bb0aa
MG
2557 /*
2558 * By rights, we should be searching for a slab page that was
2559 * PFMEMALLOC but right now, we are losing the pfmemalloc
2560 * information when the page leaves the per-cpu allocator
2561 */
2562 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
d4ff6d35 2563 deactivate_slab(s, page, c->freelist, c);
072bb0aa
MG
2564 goto new_slab;
2565 }
2566
73736e03 2567 /* must check again c->freelist in case of cpu migration or IRQ */
6faa6833
CL
2568 freelist = c->freelist;
2569 if (freelist)
73736e03 2570 goto load_freelist;
03e404af 2571
f6e7def7 2572 freelist = get_freelist(s, page);
6446faa2 2573
6faa6833 2574 if (!freelist) {
03e404af
CL
2575 c->page = NULL;
2576 stat(s, DEACTIVATE_BYPASS);
fc59c053 2577 goto new_slab;
03e404af 2578 }
6446faa2 2579
84e554e6 2580 stat(s, ALLOC_REFILL);
6446faa2 2581
894b8788 2582load_freelist:
507effea
CL
2583 /*
2584 * freelist is pointing to the list of objects to be used.
2585 * page is pointing to the page from which the objects are obtained.
2586 * That page must be frozen for per cpu allocations to work.
2587 */
a0132ac0 2588 VM_BUG_ON(!c->page->frozen);
6faa6833 2589 c->freelist = get_freepointer(s, freelist);
8a5ec0ba 2590 c->tid = next_tid(c->tid);
6faa6833 2591 return freelist;
81819f0f 2592
81819f0f 2593new_slab:
2cfb7455 2594
a93cf07b
WY
2595 if (slub_percpu_partial(c)) {
2596 page = c->page = slub_percpu_partial(c);
2597 slub_set_percpu_partial(c, page);
49e22585 2598 stat(s, CPU_PARTIAL_ALLOC);
49e22585 2599 goto redo;
81819f0f
CL
2600 }
2601
188fd063 2602 freelist = new_slab_objects(s, gfpflags, node, &c);
01ad8a7b 2603
f4697436 2604 if (unlikely(!freelist)) {
9a02d699 2605 slab_out_of_memory(s, gfpflags, node);
f4697436 2606 return NULL;
81819f0f 2607 }
2cfb7455 2608
f6e7def7 2609 page = c->page;
5091b74a 2610 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
4b6f0750 2611 goto load_freelist;
2cfb7455 2612
497b66f2 2613 /* Only entered in the debug case */
d0e0ac97
CG
2614 if (kmem_cache_debug(s) &&
2615 !alloc_debug_processing(s, page, freelist, addr))
497b66f2 2616 goto new_slab; /* Slab failed checks. Next slab needed */
894b8788 2617
d4ff6d35 2618 deactivate_slab(s, page, get_freepointer(s, freelist), c);
6faa6833 2619 return freelist;
894b8788
CL
2620}
2621
a380a3c7
CL
2622/*
2623 * Another one that disabled interrupt and compensates for possible
2624 * cpu changes by refetching the per cpu area pointer.
2625 */
2626static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2627 unsigned long addr, struct kmem_cache_cpu *c)
2628{
2629 void *p;
2630 unsigned long flags;
2631
2632 local_irq_save(flags);
2633#ifdef CONFIG_PREEMPT
2634 /*
2635 * We may have been preempted and rescheduled on a different
2636 * cpu before disabling interrupts. Need to reload cpu area
2637 * pointer.
2638 */
2639 c = this_cpu_ptr(s->cpu_slab);
2640#endif
2641
2642 p = ___slab_alloc(s, gfpflags, node, addr, c);
2643 local_irq_restore(flags);
2644 return p;
2645}
2646
894b8788
CL
2647/*
2648 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2649 * have the fastpath folded into their functions. So no function call
2650 * overhead for requests that can be satisfied on the fastpath.
2651 *
2652 * The fastpath works by first checking if the lockless freelist can be used.
2653 * If not then __slab_alloc is called for slow processing.
2654 *
2655 * Otherwise we can simply pick the next object from the lockless free list.
2656 */
2b847c3c 2657static __always_inline void *slab_alloc_node(struct kmem_cache *s,
ce71e27c 2658 gfp_t gfpflags, int node, unsigned long addr)
894b8788 2659{
03ec0ed5 2660 void *object;
dfb4f096 2661 struct kmem_cache_cpu *c;
57d437d2 2662 struct page *page;
8a5ec0ba 2663 unsigned long tid;
1f84260c 2664
8135be5a
VD
2665 s = slab_pre_alloc_hook(s, gfpflags);
2666 if (!s)
773ff60e 2667 return NULL;
8a5ec0ba 2668redo:
8a5ec0ba
CL
2669 /*
2670 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2671 * enabled. We may switch back and forth between cpus while
2672 * reading from one cpu area. That does not matter as long
2673 * as we end up on the original cpu again when doing the cmpxchg.
7cccd80b 2674 *
9aabf810
JK
2675 * We should guarantee that tid and kmem_cache are retrieved on
2676 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2677 * to check if it is matched or not.
8a5ec0ba 2678 */
9aabf810
JK
2679 do {
2680 tid = this_cpu_read(s->cpu_slab->tid);
2681 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2682 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2683 unlikely(tid != READ_ONCE(c->tid)));
9aabf810
JK
2684
2685 /*
2686 * Irqless object alloc/free algorithm used here depends on sequence
2687 * of fetching cpu_slab's data. tid should be fetched before anything
2688 * on c to guarantee that object and page associated with previous tid
2689 * won't be used with current tid. If we fetch tid first, object and
2690 * page could be one associated with next tid and our alloc/free
2691 * request will be failed. In this case, we will retry. So, no problem.
2692 */
2693 barrier();
8a5ec0ba 2694
8a5ec0ba
CL
2695 /*
2696 * The transaction ids are globally unique per cpu and per operation on
2697 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2698 * occurs on the right processor and that there was no operation on the
2699 * linked list in between.
2700 */
8a5ec0ba 2701
9dfc6e68 2702 object = c->freelist;
57d437d2 2703 page = c->page;
8eae1492 2704 if (unlikely(!object || !node_match(page, node))) {
dfb4f096 2705 object = __slab_alloc(s, gfpflags, node, addr, c);
8eae1492
DH
2706 stat(s, ALLOC_SLOWPATH);
2707 } else {
0ad9500e
ED
2708 void *next_object = get_freepointer_safe(s, object);
2709
8a5ec0ba 2710 /*
25985edc 2711 * The cmpxchg will only match if there was no additional
8a5ec0ba
CL
2712 * operation and if we are on the right processor.
2713 *
d0e0ac97
CG
2714 * The cmpxchg does the following atomically (without lock
2715 * semantics!)
8a5ec0ba
CL
2716 * 1. Relocate first pointer to the current per cpu area.
2717 * 2. Verify that tid and freelist have not been changed
2718 * 3. If they were not changed replace tid and freelist
2719 *
d0e0ac97
CG
2720 * Since this is without lock semantics the protection is only
2721 * against code executing on this cpu *not* from access by
2722 * other cpus.
8a5ec0ba 2723 */
933393f5 2724 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2725 s->cpu_slab->freelist, s->cpu_slab->tid,
2726 object, tid,
0ad9500e 2727 next_object, next_tid(tid)))) {
8a5ec0ba
CL
2728
2729 note_cmpxchg_failure("slab_alloc", s, tid);
2730 goto redo;
2731 }
0ad9500e 2732 prefetch_freepointer(s, next_object);
84e554e6 2733 stat(s, ALLOC_FASTPATH);
894b8788 2734 }
8a5ec0ba 2735
74e2134f 2736 if (unlikely(gfpflags & __GFP_ZERO) && object)
3b0efdfa 2737 memset(object, 0, s->object_size);
d07dbea4 2738
03ec0ed5 2739 slab_post_alloc_hook(s, gfpflags, 1, &object);
5a896d9e 2740
894b8788 2741 return object;
81819f0f
CL
2742}
2743
2b847c3c
EG
2744static __always_inline void *slab_alloc(struct kmem_cache *s,
2745 gfp_t gfpflags, unsigned long addr)
2746{
2747 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2748}
2749
81819f0f
CL
2750void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2751{
2b847c3c 2752 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
5b882be4 2753
d0e0ac97
CG
2754 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2755 s->size, gfpflags);
5b882be4
EGM
2756
2757 return ret;
81819f0f
CL
2758}
2759EXPORT_SYMBOL(kmem_cache_alloc);
2760
0f24f128 2761#ifdef CONFIG_TRACING
4a92379b
RK
2762void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2763{
2b847c3c 2764 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
4a92379b 2765 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
0116523c 2766 ret = kasan_kmalloc(s, ret, size, gfpflags);
4a92379b
RK
2767 return ret;
2768}
2769EXPORT_SYMBOL(kmem_cache_alloc_trace);
5b882be4
EGM
2770#endif
2771
81819f0f
CL
2772#ifdef CONFIG_NUMA
2773void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2774{
2b847c3c 2775 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
5b882be4 2776
ca2b84cb 2777 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3b0efdfa 2778 s->object_size, s->size, gfpflags, node);
5b882be4
EGM
2779
2780 return ret;
81819f0f
CL
2781}
2782EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0f 2783
0f24f128 2784#ifdef CONFIG_TRACING
4a92379b 2785void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4 2786 gfp_t gfpflags,
4a92379b 2787 int node, size_t size)
5b882be4 2788{
2b847c3c 2789 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
4a92379b
RK
2790
2791 trace_kmalloc_node(_RET_IP_, ret,
2792 size, s->size, gfpflags, node);
0316bec2 2793
0116523c 2794 ret = kasan_kmalloc(s, ret, size, gfpflags);
4a92379b 2795 return ret;
5b882be4 2796}
4a92379b 2797EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4 2798#endif
5d1f57e4 2799#endif
5b882be4 2800
81819f0f 2801/*
94e4d712 2802 * Slow path handling. This may still be called frequently since objects
894b8788 2803 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 2804 *
894b8788
CL
2805 * So we still attempt to reduce cache line usage. Just take the slab
2806 * lock and free the item. If there is no additional partial page
2807 * handling required then we can return immediately.
81819f0f 2808 */
894b8788 2809static void __slab_free(struct kmem_cache *s, struct page *page,
81084651
JDB
2810 void *head, void *tail, int cnt,
2811 unsigned long addr)
2812
81819f0f
CL
2813{
2814 void *prior;
2cfb7455 2815 int was_frozen;
2cfb7455
CL
2816 struct page new;
2817 unsigned long counters;
2818 struct kmem_cache_node *n = NULL;
61728d1e 2819 unsigned long uninitialized_var(flags);
81819f0f 2820
8a5ec0ba 2821 stat(s, FREE_SLOWPATH);
81819f0f 2822
19c7ff9e 2823 if (kmem_cache_debug(s) &&
282acb43 2824 !free_debug_processing(s, page, head, tail, cnt, addr))
80f08c19 2825 return;
6446faa2 2826
2cfb7455 2827 do {
837d678d
JK
2828 if (unlikely(n)) {
2829 spin_unlock_irqrestore(&n->list_lock, flags);
2830 n = NULL;
2831 }
2cfb7455
CL
2832 prior = page->freelist;
2833 counters = page->counters;
81084651 2834 set_freepointer(s, tail, prior);
2cfb7455
CL
2835 new.counters = counters;
2836 was_frozen = new.frozen;
81084651 2837 new.inuse -= cnt;
837d678d 2838 if ((!new.inuse || !prior) && !was_frozen) {
49e22585 2839
c65c1877 2840 if (kmem_cache_has_cpu_partial(s) && !prior) {
49e22585
CL
2841
2842 /*
d0e0ac97
CG
2843 * Slab was on no list before and will be
2844 * partially empty
2845 * We can defer the list move and instead
2846 * freeze it.
49e22585
CL
2847 */
2848 new.frozen = 1;
2849
c65c1877 2850 } else { /* Needs to be taken off a list */
49e22585 2851
b455def2 2852 n = get_node(s, page_to_nid(page));
49e22585
CL
2853 /*
2854 * Speculatively acquire the list_lock.
2855 * If the cmpxchg does not succeed then we may
2856 * drop the list_lock without any processing.
2857 *
2858 * Otherwise the list_lock will synchronize with
2859 * other processors updating the list of slabs.
2860 */
2861 spin_lock_irqsave(&n->list_lock, flags);
2862
2863 }
2cfb7455 2864 }
81819f0f 2865
2cfb7455
CL
2866 } while (!cmpxchg_double_slab(s, page,
2867 prior, counters,
81084651 2868 head, new.counters,
2cfb7455 2869 "__slab_free"));
81819f0f 2870
2cfb7455 2871 if (likely(!n)) {
49e22585
CL
2872
2873 /*
2874 * If we just froze the page then put it onto the
2875 * per cpu partial list.
2876 */
8028dcea 2877 if (new.frozen && !was_frozen) {
49e22585 2878 put_cpu_partial(s, page, 1);
8028dcea
AS
2879 stat(s, CPU_PARTIAL_FREE);
2880 }
49e22585 2881 /*
2cfb7455
CL
2882 * The list lock was not taken therefore no list
2883 * activity can be necessary.
2884 */
b455def2
L
2885 if (was_frozen)
2886 stat(s, FREE_FROZEN);
2887 return;
2888 }
81819f0f 2889
8a5b20ae 2890 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
837d678d
JK
2891 goto slab_empty;
2892
81819f0f 2893 /*
837d678d
JK
2894 * Objects left in the slab. If it was not on the partial list before
2895 * then add it.
81819f0f 2896 */
345c905d
JK
2897 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2898 if (kmem_cache_debug(s))
c65c1877 2899 remove_full(s, n, page);
837d678d
JK
2900 add_partial(n, page, DEACTIVATE_TO_TAIL);
2901 stat(s, FREE_ADD_PARTIAL);
8ff12cfc 2902 }
80f08c19 2903 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2904 return;
2905
2906slab_empty:
a973e9dd 2907 if (prior) {
81819f0f 2908 /*
6fbabb20 2909 * Slab on the partial list.
81819f0f 2910 */
5cc6eee8 2911 remove_partial(n, page);
84e554e6 2912 stat(s, FREE_REMOVE_PARTIAL);
c65c1877 2913 } else {
6fbabb20 2914 /* Slab must be on the full list */
c65c1877
PZ
2915 remove_full(s, n, page);
2916 }
2cfb7455 2917
80f08c19 2918 spin_unlock_irqrestore(&n->list_lock, flags);
84e554e6 2919 stat(s, FREE_SLAB);
81819f0f 2920 discard_slab(s, page);
81819f0f
CL
2921}
2922
894b8788
CL
2923/*
2924 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2925 * can perform fastpath freeing without additional function calls.
2926 *
2927 * The fastpath is only possible if we are freeing to the current cpu slab
2928 * of this processor. This typically the case if we have just allocated
2929 * the item before.
2930 *
2931 * If fastpath is not possible then fall back to __slab_free where we deal
2932 * with all sorts of special processing.
81084651
JDB
2933 *
2934 * Bulk free of a freelist with several objects (all pointing to the
2935 * same page) possible by specifying head and tail ptr, plus objects
2936 * count (cnt). Bulk free indicated by tail pointer being set.
894b8788 2937 */
80a9201a
AP
2938static __always_inline void do_slab_free(struct kmem_cache *s,
2939 struct page *page, void *head, void *tail,
2940 int cnt, unsigned long addr)
894b8788 2941{
81084651 2942 void *tail_obj = tail ? : head;
dfb4f096 2943 struct kmem_cache_cpu *c;
8a5ec0ba 2944 unsigned long tid;
8a5ec0ba
CL
2945redo:
2946 /*
2947 * Determine the currently cpus per cpu slab.
2948 * The cpu may change afterward. However that does not matter since
2949 * data is retrieved via this pointer. If we are on the same cpu
2ae44005 2950 * during the cmpxchg then the free will succeed.
8a5ec0ba 2951 */
9aabf810
JK
2952 do {
2953 tid = this_cpu_read(s->cpu_slab->tid);
2954 c = raw_cpu_ptr(s->cpu_slab);
859b7a0e
MR
2955 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2956 unlikely(tid != READ_ONCE(c->tid)));
c016b0bd 2957
9aabf810
JK
2958 /* Same with comment on barrier() in slab_alloc_node() */
2959 barrier();
c016b0bd 2960
442b06bc 2961 if (likely(page == c->page)) {
81084651 2962 set_freepointer(s, tail_obj, c->freelist);
8a5ec0ba 2963
933393f5 2964 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2965 s->cpu_slab->freelist, s->cpu_slab->tid,
2966 c->freelist, tid,
81084651 2967 head, next_tid(tid)))) {
8a5ec0ba
CL
2968
2969 note_cmpxchg_failure("slab_free", s, tid);
2970 goto redo;
2971 }
84e554e6 2972 stat(s, FREE_FASTPATH);
894b8788 2973 } else
81084651 2974 __slab_free(s, page, head, tail_obj, cnt, addr);
894b8788 2975
894b8788
CL
2976}
2977
80a9201a
AP
2978static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2979 void *head, void *tail, int cnt,
2980 unsigned long addr)
2981{
80a9201a 2982 /*
c3895391
AK
2983 * With KASAN enabled slab_free_freelist_hook modifies the freelist
2984 * to remove objects, whose reuse must be delayed.
80a9201a 2985 */
c3895391
AK
2986 if (slab_free_freelist_hook(s, &head, &tail))
2987 do_slab_free(s, page, head, tail, cnt, addr);
80a9201a
AP
2988}
2989
2bd926b4 2990#ifdef CONFIG_KASAN_GENERIC
80a9201a
AP
2991void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2992{
2993 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2994}
2995#endif
2996
81819f0f
CL
2997void kmem_cache_free(struct kmem_cache *s, void *x)
2998{
b9ce5ef4
GC
2999 s = cache_from_obj(s, x);
3000 if (!s)
79576102 3001 return;
81084651 3002 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
ca2b84cb 3003 trace_kmem_cache_free(_RET_IP_, x);
81819f0f
CL
3004}
3005EXPORT_SYMBOL(kmem_cache_free);
3006
d0ecd894 3007struct detached_freelist {
fbd02630 3008 struct page *page;
d0ecd894
JDB
3009 void *tail;
3010 void *freelist;
3011 int cnt;
376bf125 3012 struct kmem_cache *s;
d0ecd894 3013};
fbd02630 3014
d0ecd894
JDB
3015/*
3016 * This function progressively scans the array with free objects (with
3017 * a limited look ahead) and extract objects belonging to the same
3018 * page. It builds a detached freelist directly within the given
3019 * page/objects. This can happen without any need for
3020 * synchronization, because the objects are owned by running process.
3021 * The freelist is build up as a single linked list in the objects.
3022 * The idea is, that this detached freelist can then be bulk
3023 * transferred to the real freelist(s), but only requiring a single
3024 * synchronization primitive. Look ahead in the array is limited due
3025 * to performance reasons.
3026 */
376bf125
JDB
3027static inline
3028int build_detached_freelist(struct kmem_cache *s, size_t size,
3029 void **p, struct detached_freelist *df)
d0ecd894
JDB
3030{
3031 size_t first_skipped_index = 0;
3032 int lookahead = 3;
3033 void *object;
ca257195 3034 struct page *page;
fbd02630 3035
d0ecd894
JDB
3036 /* Always re-init detached_freelist */
3037 df->page = NULL;
fbd02630 3038
d0ecd894
JDB
3039 do {
3040 object = p[--size];
ca257195 3041 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
d0ecd894 3042 } while (!object && size);
3eed034d 3043
d0ecd894
JDB
3044 if (!object)
3045 return 0;
fbd02630 3046
ca257195
JDB
3047 page = virt_to_head_page(object);
3048 if (!s) {
3049 /* Handle kalloc'ed objects */
3050 if (unlikely(!PageSlab(page))) {
3051 BUG_ON(!PageCompound(page));
3052 kfree_hook(object);
4949148a 3053 __free_pages(page, compound_order(page));
ca257195
JDB
3054 p[size] = NULL; /* mark object processed */
3055 return size;
3056 }
3057 /* Derive kmem_cache from object */
3058 df->s = page->slab_cache;
3059 } else {
3060 df->s = cache_from_obj(s, object); /* Support for memcg */
3061 }
376bf125 3062
d0ecd894 3063 /* Start new detached freelist */
ca257195 3064 df->page = page;
376bf125 3065 set_freepointer(df->s, object, NULL);
d0ecd894
JDB
3066 df->tail = object;
3067 df->freelist = object;
3068 p[size] = NULL; /* mark object processed */
3069 df->cnt = 1;
3070
3071 while (size) {
3072 object = p[--size];
3073 if (!object)
3074 continue; /* Skip processed objects */
3075
3076 /* df->page is always set at this point */
3077 if (df->page == virt_to_head_page(object)) {
3078 /* Opportunity build freelist */
376bf125 3079 set_freepointer(df->s, object, df->freelist);
d0ecd894
JDB
3080 df->freelist = object;
3081 df->cnt++;
3082 p[size] = NULL; /* mark object processed */
3083
3084 continue;
fbd02630 3085 }
d0ecd894
JDB
3086
3087 /* Limit look ahead search */
3088 if (!--lookahead)
3089 break;
3090
3091 if (!first_skipped_index)
3092 first_skipped_index = size + 1;
fbd02630 3093 }
d0ecd894
JDB
3094
3095 return first_skipped_index;
3096}
3097
d0ecd894 3098/* Note that interrupts must be enabled when calling this function. */
376bf125 3099void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
d0ecd894
JDB
3100{
3101 if (WARN_ON(!size))
3102 return;
3103
3104 do {
3105 struct detached_freelist df;
3106
3107 size = build_detached_freelist(s, size, p, &df);
84582c8a 3108 if (!df.page)
d0ecd894
JDB
3109 continue;
3110
376bf125 3111 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
d0ecd894 3112 } while (likely(size));
484748f0
CL
3113}
3114EXPORT_SYMBOL(kmem_cache_free_bulk);
3115
994eb764 3116/* Note that interrupts must be enabled when calling this function. */
865762a8
JDB
3117int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3118 void **p)
484748f0 3119{
994eb764
JDB
3120 struct kmem_cache_cpu *c;
3121 int i;
3122
03ec0ed5
JDB
3123 /* memcg and kmem_cache debug support */
3124 s = slab_pre_alloc_hook(s, flags);
3125 if (unlikely(!s))
3126 return false;
994eb764
JDB
3127 /*
3128 * Drain objects in the per cpu slab, while disabling local
3129 * IRQs, which protects against PREEMPT and interrupts
3130 * handlers invoking normal fastpath.
3131 */
3132 local_irq_disable();
3133 c = this_cpu_ptr(s->cpu_slab);
3134
3135 for (i = 0; i < size; i++) {
3136 void *object = c->freelist;
3137
ebe909e0 3138 if (unlikely(!object)) {
ebe909e0
JDB
3139 /*
3140 * Invoking slow path likely have side-effect
3141 * of re-populating per CPU c->freelist
3142 */
87098373 3143 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
ebe909e0 3144 _RET_IP_, c);
87098373
CL
3145 if (unlikely(!p[i]))
3146 goto error;
3147
ebe909e0
JDB
3148 c = this_cpu_ptr(s->cpu_slab);
3149 continue; /* goto for-loop */
3150 }
994eb764
JDB
3151 c->freelist = get_freepointer(s, object);
3152 p[i] = object;
3153 }
3154 c->tid = next_tid(c->tid);
3155 local_irq_enable();
3156
3157 /* Clear memory outside IRQ disabled fastpath loop */
3158 if (unlikely(flags & __GFP_ZERO)) {
3159 int j;
3160
3161 for (j = 0; j < i; j++)
3162 memset(p[j], 0, s->object_size);
3163 }
3164
03ec0ed5
JDB
3165 /* memcg and kmem_cache debug support */
3166 slab_post_alloc_hook(s, flags, size, p);
865762a8 3167 return i;
87098373 3168error:
87098373 3169 local_irq_enable();
03ec0ed5
JDB
3170 slab_post_alloc_hook(s, flags, i, p);
3171 __kmem_cache_free_bulk(s, i, p);
865762a8 3172 return 0;
484748f0
CL
3173}
3174EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3175
3176
81819f0f 3177/*
672bba3a
CL
3178 * Object placement in a slab is made very easy because we always start at
3179 * offset 0. If we tune the size of the object to the alignment then we can
3180 * get the required alignment by putting one properly sized object after
3181 * another.
81819f0f
CL
3182 *
3183 * Notice that the allocation order determines the sizes of the per cpu
3184 * caches. Each processor has always one slab available for allocations.
3185 * Increasing the allocation order reduces the number of times that slabs
672bba3a 3186 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 3187 * locking overhead.
81819f0f
CL
3188 */
3189
3190/*
3191 * Mininum / Maximum order of slab pages. This influences locking overhead
3192 * and slab fragmentation. A higher order reduces the number of partial slabs
3193 * and increases the number of allocations possible without having to
3194 * take the list_lock.
3195 */
19af27af
AD
3196static unsigned int slub_min_order;
3197static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3198static unsigned int slub_min_objects;
81819f0f 3199
81819f0f
CL
3200/*
3201 * Calculate the order of allocation given an slab object size.
3202 *
672bba3a
CL
3203 * The order of allocation has significant impact on performance and other
3204 * system components. Generally order 0 allocations should be preferred since
3205 * order 0 does not cause fragmentation in the page allocator. Larger objects
3206 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 3207 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
3208 * would be wasted.
3209 *
3210 * In order to reach satisfactory performance we must ensure that a minimum
3211 * number of objects is in one slab. Otherwise we may generate too much
3212 * activity on the partial lists which requires taking the list_lock. This is
3213 * less a concern for large slabs though which are rarely used.
81819f0f 3214 *
672bba3a
CL
3215 * slub_max_order specifies the order where we begin to stop considering the
3216 * number of objects in a slab as critical. If we reach slub_max_order then
3217 * we try to keep the page order as low as possible. So we accept more waste
3218 * of space in favor of a small page order.
81819f0f 3219 *
672bba3a
CL
3220 * Higher order allocations also allow the placement of more objects in a
3221 * slab and thereby reduce object handling overhead. If the user has
3222 * requested a higher mininum order then we start with that one instead of
3223 * the smallest order which will fit the object.
81819f0f 3224 */
19af27af
AD
3225static inline unsigned int slab_order(unsigned int size,
3226 unsigned int min_objects, unsigned int max_order,
9736d2a9 3227 unsigned int fract_leftover)
81819f0f 3228{
19af27af
AD
3229 unsigned int min_order = slub_min_order;
3230 unsigned int order;
81819f0f 3231
9736d2a9 3232 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
210b5c06 3233 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b26464 3234
9736d2a9 3235 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
5e6d444e 3236 order <= max_order; order++) {
81819f0f 3237
19af27af
AD
3238 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3239 unsigned int rem;
81819f0f 3240
9736d2a9 3241 rem = slab_size % size;
81819f0f 3242
5e6d444e 3243 if (rem <= slab_size / fract_leftover)
81819f0f 3244 break;
81819f0f 3245 }
672bba3a 3246
81819f0f
CL
3247 return order;
3248}
3249
9736d2a9 3250static inline int calculate_order(unsigned int size)
5e6d444e 3251{
19af27af
AD
3252 unsigned int order;
3253 unsigned int min_objects;
3254 unsigned int max_objects;
5e6d444e
CL
3255
3256 /*
3257 * Attempt to find best configuration for a slab. This
3258 * works by first attempting to generate a layout with
3259 * the best configuration and backing off gradually.
3260 *
422ff4d7 3261 * First we increase the acceptable waste in a slab. Then
5e6d444e
CL
3262 * we reduce the minimum objects required in a slab.
3263 */
3264 min_objects = slub_min_objects;
9b2cd506
CL
3265 if (!min_objects)
3266 min_objects = 4 * (fls(nr_cpu_ids) + 1);
9736d2a9 3267 max_objects = order_objects(slub_max_order, size);
e8120ff1
ZY
3268 min_objects = min(min_objects, max_objects);
3269
5e6d444e 3270 while (min_objects > 1) {
19af27af
AD
3271 unsigned int fraction;
3272
c124f5b5 3273 fraction = 16;
5e6d444e
CL
3274 while (fraction >= 4) {
3275 order = slab_order(size, min_objects,
9736d2a9 3276 slub_max_order, fraction);
5e6d444e
CL
3277 if (order <= slub_max_order)
3278 return order;
3279 fraction /= 2;
3280 }
5086c389 3281 min_objects--;
5e6d444e
CL
3282 }
3283
3284 /*
3285 * We were unable to place multiple objects in a slab. Now
3286 * lets see if we can place a single object there.
3287 */
9736d2a9 3288 order = slab_order(size, 1, slub_max_order, 1);
5e6d444e
CL
3289 if (order <= slub_max_order)
3290 return order;
3291
3292 /*
3293 * Doh this slab cannot be placed using slub_max_order.
3294 */
9736d2a9 3295 order = slab_order(size, 1, MAX_ORDER, 1);
818cf590 3296 if (order < MAX_ORDER)
5e6d444e
CL
3297 return order;
3298 return -ENOSYS;
3299}
3300
5595cffc 3301static void
4053497d 3302init_kmem_cache_node(struct kmem_cache_node *n)
81819f0f
CL
3303{
3304 n->nr_partial = 0;
81819f0f
CL
3305 spin_lock_init(&n->list_lock);
3306 INIT_LIST_HEAD(&n->partial);
8ab1372f 3307#ifdef CONFIG_SLUB_DEBUG
0f389ec6 3308 atomic_long_set(&n->nr_slabs, 0);
02b71b70 3309 atomic_long_set(&n->total_objects, 0);
643b1138 3310 INIT_LIST_HEAD(&n->full);
8ab1372f 3311#endif
81819f0f
CL
3312}
3313
55136592 3314static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355 3315{
6c182dc0 3316 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
95a05b42 3317 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
4c93c355 3318
8a5ec0ba 3319 /*
d4d84fef
CM
3320 * Must align to double word boundary for the double cmpxchg
3321 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba 3322 */
d4d84fef
CM
3323 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3324 2 * sizeof(void *));
8a5ec0ba
CL
3325
3326 if (!s->cpu_slab)
3327 return 0;
3328
3329 init_kmem_cache_cpus(s);
4c93c355 3330
8a5ec0ba 3331 return 1;
4c93c355 3332}
4c93c355 3333
51df1142
CL
3334static struct kmem_cache *kmem_cache_node;
3335
81819f0f
CL
3336/*
3337 * No kmalloc_node yet so do it by hand. We know that this is the first
3338 * slab on the node for this slabcache. There are no concurrent accesses
3339 * possible.
3340 *
721ae22a
ZYW
3341 * Note that this function only works on the kmem_cache_node
3342 * when allocating for the kmem_cache_node. This is used for bootstrapping
4c93c355 3343 * memory on a fresh node that has no slab structures yet.
81819f0f 3344 */
55136592 3345static void early_kmem_cache_node_alloc(int node)
81819f0f
CL
3346{
3347 struct page *page;
3348 struct kmem_cache_node *n;
3349
51df1142 3350 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0f 3351
51df1142 3352 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0f
CL
3353
3354 BUG_ON(!page);
a2f92ee7 3355 if (page_to_nid(page) != node) {
f9f58285
FF
3356 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3357 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
a2f92ee7
CL
3358 }
3359
81819f0f
CL
3360 n = page->freelist;
3361 BUG_ON(!n);
8ab1372f 3362#ifdef CONFIG_SLUB_DEBUG
f7cb1933 3363 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df1142 3364 init_tracking(kmem_cache_node, n);
8ab1372f 3365#endif
12b22386 3366 n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
505f5dcb 3367 GFP_KERNEL);
12b22386
AK
3368 page->freelist = get_freepointer(kmem_cache_node, n);
3369 page->inuse = 1;
3370 page->frozen = 0;
3371 kmem_cache_node->node[node] = n;
4053497d 3372 init_kmem_cache_node(n);
51df1142 3373 inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2 3374
67b6c900 3375 /*
1e4dd946
SR
3376 * No locks need to be taken here as it has just been
3377 * initialized and there is no concurrent access.
67b6c900 3378 */
1e4dd946 3379 __add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0f
CL
3380}
3381
3382static void free_kmem_cache_nodes(struct kmem_cache *s)
3383{
3384 int node;
fa45dc25 3385 struct kmem_cache_node *n;
81819f0f 3386
fa45dc25 3387 for_each_kmem_cache_node(s, node, n) {
81819f0f 3388 s->node[node] = NULL;
ea37df54 3389 kmem_cache_free(kmem_cache_node, n);
81819f0f
CL
3390 }
3391}
3392
52b4b950
DS
3393void __kmem_cache_release(struct kmem_cache *s)
3394{
210e7a43 3395 cache_random_seq_destroy(s);
52b4b950
DS
3396 free_percpu(s->cpu_slab);
3397 free_kmem_cache_nodes(s);
3398}
3399
55136592 3400static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0f
CL
3401{
3402 int node;
81819f0f 3403
f64dc58c 3404 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3405 struct kmem_cache_node *n;
3406
73367bd8 3407 if (slab_state == DOWN) {
55136592 3408 early_kmem_cache_node_alloc(node);
73367bd8
AD
3409 continue;
3410 }
51df1142 3411 n = kmem_cache_alloc_node(kmem_cache_node,
55136592 3412 GFP_KERNEL, node);
81819f0f 3413
73367bd8
AD
3414 if (!n) {
3415 free_kmem_cache_nodes(s);
3416 return 0;
81819f0f 3417 }
73367bd8 3418
4053497d 3419 init_kmem_cache_node(n);
ea37df54 3420 s->node[node] = n;
81819f0f
CL
3421 }
3422 return 1;
3423}
81819f0f 3424
c0bdb232 3425static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d8
DR
3426{
3427 if (min < MIN_PARTIAL)
3428 min = MIN_PARTIAL;
3429 else if (min > MAX_PARTIAL)
3430 min = MAX_PARTIAL;
3431 s->min_partial = min;
3432}
3433
e6d0e1dc
WY
3434static void set_cpu_partial(struct kmem_cache *s)
3435{
3436#ifdef CONFIG_SLUB_CPU_PARTIAL
3437 /*
3438 * cpu_partial determined the maximum number of objects kept in the
3439 * per cpu partial lists of a processor.
3440 *
3441 * Per cpu partial lists mainly contain slabs that just have one
3442 * object freed. If they are used for allocation then they can be
3443 * filled up again with minimal effort. The slab will never hit the
3444 * per node partial lists and therefore no locking will be required.
3445 *
3446 * This setting also determines
3447 *
3448 * A) The number of objects from per cpu partial slabs dumped to the
3449 * per node list when we reach the limit.
3450 * B) The number of objects in cpu partial slabs to extract from the
3451 * per node list when we run out of per cpu objects. We only fetch
3452 * 50% to keep some capacity around for frees.
3453 */
3454 if (!kmem_cache_has_cpu_partial(s))
3455 s->cpu_partial = 0;
3456 else if (s->size >= PAGE_SIZE)
3457 s->cpu_partial = 2;
3458 else if (s->size >= 1024)
3459 s->cpu_partial = 6;
3460 else if (s->size >= 256)
3461 s->cpu_partial = 13;
3462 else
3463 s->cpu_partial = 30;
3464#endif
3465}
3466
81819f0f
CL
3467/*
3468 * calculate_sizes() determines the order and the distribution of data within
3469 * a slab object.
3470 */
06b285dc 3471static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f 3472{
d50112ed 3473 slab_flags_t flags = s->flags;
be4a7988 3474 unsigned int size = s->object_size;
19af27af 3475 unsigned int order;
81819f0f 3476
d8b42bf5
CL
3477 /*
3478 * Round up object size to the next word boundary. We can only
3479 * place the free pointer at word boundaries and this determines
3480 * the possible location of the free pointer.
3481 */
3482 size = ALIGN(size, sizeof(void *));
3483
3484#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3485 /*
3486 * Determine if we can poison the object itself. If the user of
3487 * the slab may touch the object after free or before allocation
3488 * then we should never poison the object itself.
3489 */
5f0d5a3a 3490 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
c59def9f 3491 !s->ctor)
81819f0f
CL
3492 s->flags |= __OBJECT_POISON;
3493 else
3494 s->flags &= ~__OBJECT_POISON;
3495
81819f0f
CL
3496
3497 /*
672bba3a 3498 * If we are Redzoning then check if there is some space between the
81819f0f 3499 * end of the object and the free pointer. If not then add an
672bba3a 3500 * additional word to have some bytes to store Redzone information.
81819f0f 3501 */
3b0efdfa 3502 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
81819f0f 3503 size += sizeof(void *);
41ecc55b 3504#endif
81819f0f
CL
3505
3506 /*
672bba3a
CL
3507 * With that we have determined the number of bytes in actual use
3508 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
3509 */
3510 s->inuse = size;
3511
5f0d5a3a 3512 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
c59def9f 3513 s->ctor)) {
81819f0f
CL
3514 /*
3515 * Relocate free pointer after the object if it is not
3516 * permitted to overwrite the first word of the object on
3517 * kmem_cache_free.
3518 *
3519 * This is the case if we do RCU, have a constructor or
3520 * destructor or are poisoning the objects.
3521 */
3522 s->offset = size;
3523 size += sizeof(void *);
3524 }
3525
c12b3c62 3526#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3527 if (flags & SLAB_STORE_USER)
3528 /*
3529 * Need to store information about allocs and frees after
3530 * the object.
3531 */
3532 size += 2 * sizeof(struct track);
80a9201a 3533#endif
81819f0f 3534
80a9201a
AP
3535 kasan_cache_create(s, &size, &s->flags);
3536#ifdef CONFIG_SLUB_DEBUG
d86bd1be 3537 if (flags & SLAB_RED_ZONE) {
81819f0f
CL
3538 /*
3539 * Add some empty padding so that we can catch
3540 * overwrites from earlier objects rather than let
3541 * tracking information or the free pointer be
0211a9c8 3542 * corrupted if a user writes before the start
81819f0f
CL
3543 * of the object.
3544 */
3545 size += sizeof(void *);
d86bd1be
JK
3546
3547 s->red_left_pad = sizeof(void *);
3548 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3549 size += s->red_left_pad;
3550 }
41ecc55b 3551#endif
672bba3a 3552
81819f0f
CL
3553 /*
3554 * SLUB stores one object immediately after another beginning from
3555 * offset 0. In order to align the objects we have to simply size
3556 * each object to conform to the alignment.
3557 */
45906855 3558 size = ALIGN(size, s->align);
81819f0f 3559 s->size = size;
06b285dc
CL
3560 if (forced_order >= 0)
3561 order = forced_order;
3562 else
9736d2a9 3563 order = calculate_order(size);
81819f0f 3564
19af27af 3565 if ((int)order < 0)
81819f0f
CL
3566 return 0;
3567
b7a49f0d 3568 s->allocflags = 0;
834f3d11 3569 if (order)
b7a49f0d
CL
3570 s->allocflags |= __GFP_COMP;
3571
3572 if (s->flags & SLAB_CACHE_DMA)
2c59dd65 3573 s->allocflags |= GFP_DMA;
b7a49f0d
CL
3574
3575 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3576 s->allocflags |= __GFP_RECLAIMABLE;
3577
81819f0f
CL
3578 /*
3579 * Determine the number of objects per slab
3580 */
9736d2a9
MW
3581 s->oo = oo_make(order, size);
3582 s->min = oo_make(get_order(size), size);
205ab99d
CL
3583 if (oo_objects(s->oo) > oo_objects(s->max))
3584 s->max = s->oo;
81819f0f 3585
834f3d11 3586 return !!oo_objects(s->oo);
81819f0f
CL
3587}
3588
d50112ed 3589static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
81819f0f 3590{
8a13a4cc 3591 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
2482ddec
KC
3592#ifdef CONFIG_SLAB_FREELIST_HARDENED
3593 s->random = get_random_long();
3594#endif
81819f0f 3595
06b285dc 3596 if (!calculate_sizes(s, -1))
81819f0f 3597 goto error;
3de47213
DR
3598 if (disable_higher_order_debug) {
3599 /*
3600 * Disable debugging flags that store metadata if the min slab
3601 * order increased.
3602 */
3b0efdfa 3603 if (get_order(s->size) > get_order(s->object_size)) {
3de47213
DR
3604 s->flags &= ~DEBUG_METADATA_FLAGS;
3605 s->offset = 0;
3606 if (!calculate_sizes(s, -1))
3607 goto error;
3608 }
3609 }
81819f0f 3610
2565409f
HC
3611#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3612 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
149daaf3 3613 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
b789ef51
CL
3614 /* Enable fast mode */
3615 s->flags |= __CMPXCHG_DOUBLE;
3616#endif
3617
3b89d7d8
DR
3618 /*
3619 * The larger the object size is, the more pages we want on the partial
3620 * list to avoid pounding the page allocator excessively.
3621 */
49e22585
CL
3622 set_min_partial(s, ilog2(s->size) / 2);
3623
e6d0e1dc 3624 set_cpu_partial(s);
49e22585 3625
81819f0f 3626#ifdef CONFIG_NUMA
e2cb96b7 3627 s->remote_node_defrag_ratio = 1000;
81819f0f 3628#endif
210e7a43
TG
3629
3630 /* Initialize the pre-computed randomized freelist if slab is up */
3631 if (slab_state >= UP) {
3632 if (init_cache_random_seq(s))
3633 goto error;
3634 }
3635
55136592 3636 if (!init_kmem_cache_nodes(s))
dfb4f096 3637 goto error;
81819f0f 3638
55136592 3639 if (alloc_kmem_cache_cpus(s))
278b1bb1 3640 return 0;
ff12059e 3641
4c93c355 3642 free_kmem_cache_nodes(s);
81819f0f
CL
3643error:
3644 if (flags & SLAB_PANIC)
44065b2e
AD
3645 panic("Cannot create slab %s size=%u realsize=%u order=%u offset=%u flags=%lx\n",
3646 s->name, s->size, s->size,
4fd0b46e 3647 oo_order(s->oo), s->offset, (unsigned long)flags);
278b1bb1 3648 return -EINVAL;
81819f0f 3649}
81819f0f 3650
33b12c38
CL
3651static void list_slab_objects(struct kmem_cache *s, struct page *page,
3652 const char *text)
3653{
3654#ifdef CONFIG_SLUB_DEBUG
3655 void *addr = page_address(page);
3656 void *p;
0684e652 3657 unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
bbd7d57b
ED
3658 if (!map)
3659 return;
945cf2b6 3660 slab_err(s, page, text, s->name);
33b12c38 3661 slab_lock(page);
33b12c38 3662
5f80b13a 3663 get_map(s, page, map);
33b12c38
CL
3664 for_each_object(p, s, addr, page->objects) {
3665
3666 if (!test_bit(slab_index(p, s, addr), map)) {
f9f58285 3667 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
33b12c38
CL
3668 print_tracking(s, p);
3669 }
3670 }
3671 slab_unlock(page);
0684e652 3672 bitmap_free(map);
33b12c38
CL
3673#endif
3674}
3675
81819f0f 3676/*
599870b1 3677 * Attempt to free all partial slabs on a node.
52b4b950
DS
3678 * This is called from __kmem_cache_shutdown(). We must take list_lock
3679 * because sysfs file might still access partial list after the shutdowning.
81819f0f 3680 */
599870b1 3681static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 3682{
60398923 3683 LIST_HEAD(discard);
81819f0f
CL
3684 struct page *page, *h;
3685
52b4b950
DS
3686 BUG_ON(irqs_disabled());
3687 spin_lock_irq(&n->list_lock);
33b12c38 3688 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f 3689 if (!page->inuse) {
52b4b950 3690 remove_partial(n, page);
60398923 3691 list_add(&page->lru, &discard);
33b12c38
CL
3692 } else {
3693 list_slab_objects(s, page,
52b4b950 3694 "Objects remaining in %s on __kmem_cache_shutdown()");
599870b1 3695 }
33b12c38 3696 }
52b4b950 3697 spin_unlock_irq(&n->list_lock);
60398923
CW
3698
3699 list_for_each_entry_safe(page, h, &discard, lru)
3700 discard_slab(s, page);
81819f0f
CL
3701}
3702
f9e13c0a
SB
3703bool __kmem_cache_empty(struct kmem_cache *s)
3704{
3705 int node;
3706 struct kmem_cache_node *n;
3707
3708 for_each_kmem_cache_node(s, node, n)
3709 if (n->nr_partial || slabs_node(s, node))
3710 return false;
3711 return true;
3712}
3713
81819f0f 3714/*
672bba3a 3715 * Release all resources used by a slab cache.
81819f0f 3716 */
52b4b950 3717int __kmem_cache_shutdown(struct kmem_cache *s)
81819f0f
CL
3718{
3719 int node;
fa45dc25 3720 struct kmem_cache_node *n;
81819f0f
CL
3721
3722 flush_all(s);
81819f0f 3723 /* Attempt to free all objects */
fa45dc25 3724 for_each_kmem_cache_node(s, node, n) {
599870b1
CL
3725 free_partial(s, n);
3726 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
3727 return 1;
3728 }
bf5eb3de 3729 sysfs_slab_remove(s);
81819f0f
CL
3730 return 0;
3731}
3732
81819f0f
CL
3733/********************************************************************
3734 * Kmalloc subsystem
3735 *******************************************************************/
3736
81819f0f
CL
3737static int __init setup_slub_min_order(char *str)
3738{
19af27af 3739 get_option(&str, (int *)&slub_min_order);
81819f0f
CL
3740
3741 return 1;
3742}
3743
3744__setup("slub_min_order=", setup_slub_min_order);
3745
3746static int __init setup_slub_max_order(char *str)
3747{
19af27af
AD
3748 get_option(&str, (int *)&slub_max_order);
3749 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
81819f0f
CL
3750
3751 return 1;
3752}
3753
3754__setup("slub_max_order=", setup_slub_max_order);
3755
3756static int __init setup_slub_min_objects(char *str)
3757{
19af27af 3758 get_option(&str, (int *)&slub_min_objects);
81819f0f
CL
3759
3760 return 1;
3761}
3762
3763__setup("slub_min_objects=", setup_slub_min_objects);
3764
81819f0f
CL
3765void *__kmalloc(size_t size, gfp_t flags)
3766{
aadb4bc4 3767 struct kmem_cache *s;
5b882be4 3768 void *ret;
81819f0f 3769
95a05b42 3770 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef 3771 return kmalloc_large(size, flags);
aadb4bc4 3772
2c59dd65 3773 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3774
3775 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3776 return s;
3777
2b847c3c 3778 ret = slab_alloc(s, flags, _RET_IP_);
5b882be4 3779
ca2b84cb 3780 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4 3781
0116523c 3782 ret = kasan_kmalloc(s, ret, size, flags);
0316bec2 3783
5b882be4 3784 return ret;
81819f0f
CL
3785}
3786EXPORT_SYMBOL(__kmalloc);
3787
5d1f57e4 3788#ifdef CONFIG_NUMA
f619cfe1
CL
3789static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3790{
b1eeab67 3791 struct page *page;
e4f7c0b4 3792 void *ptr = NULL;
f619cfe1 3793
75f296d9 3794 flags |= __GFP_COMP;
4949148a 3795 page = alloc_pages_node(node, flags, get_order(size));
f619cfe1 3796 if (page)
e4f7c0b4
CM
3797 ptr = page_address(page);
3798
0116523c 3799 return kmalloc_large_node_hook(ptr, size, flags);
f619cfe1
CL
3800}
3801
81819f0f
CL
3802void *__kmalloc_node(size_t size, gfp_t flags, int node)
3803{
aadb4bc4 3804 struct kmem_cache *s;
5b882be4 3805 void *ret;
81819f0f 3806
95a05b42 3807 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5b882be4
EGM
3808 ret = kmalloc_large_node(size, flags, node);
3809
ca2b84cb
EGM
3810 trace_kmalloc_node(_RET_IP_, ret,
3811 size, PAGE_SIZE << get_order(size),
3812 flags, node);
5b882be4
EGM
3813
3814 return ret;
3815 }
aadb4bc4 3816
2c59dd65 3817 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3818
3819 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3820 return s;
3821
2b847c3c 3822 ret = slab_alloc_node(s, flags, node, _RET_IP_);
5b882be4 3823
ca2b84cb 3824 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4 3825
0116523c 3826 ret = kasan_kmalloc(s, ret, size, flags);
0316bec2 3827
5b882be4 3828 return ret;
81819f0f
CL
3829}
3830EXPORT_SYMBOL(__kmalloc_node);
3831#endif
3832
ed18adc1
KC
3833#ifdef CONFIG_HARDENED_USERCOPY
3834/*
afcc90f8
KC
3835 * Rejects incorrectly sized objects and objects that are to be copied
3836 * to/from userspace but do not fall entirely within the containing slab
3837 * cache's usercopy region.
ed18adc1
KC
3838 *
3839 * Returns NULL if check passes, otherwise const char * to name of cache
3840 * to indicate an error.
3841 */
f4e6e289
KC
3842void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3843 bool to_user)
ed18adc1
KC
3844{
3845 struct kmem_cache *s;
44065b2e 3846 unsigned int offset;
ed18adc1
KC
3847 size_t object_size;
3848
3849 /* Find object and usable object size. */
3850 s = page->slab_cache;
ed18adc1
KC
3851
3852 /* Reject impossible pointers. */
3853 if (ptr < page_address(page))
f4e6e289
KC
3854 usercopy_abort("SLUB object not in SLUB page?!", NULL,
3855 to_user, 0, n);
ed18adc1
KC
3856
3857 /* Find offset within object. */
3858 offset = (ptr - page_address(page)) % s->size;
3859
3860 /* Adjust for redzone and reject if within the redzone. */
3861 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3862 if (offset < s->red_left_pad)
f4e6e289
KC
3863 usercopy_abort("SLUB object in left red zone",
3864 s->name, to_user, offset, n);
ed18adc1
KC
3865 offset -= s->red_left_pad;
3866 }
3867
afcc90f8
KC
3868 /* Allow address range falling entirely within usercopy region. */
3869 if (offset >= s->useroffset &&
3870 offset - s->useroffset <= s->usersize &&
3871 n <= s->useroffset - offset + s->usersize)
f4e6e289 3872 return;
ed18adc1 3873
afcc90f8
KC
3874 /*
3875 * If the copy is still within the allocated object, produce
3876 * a warning instead of rejecting the copy. This is intended
3877 * to be a temporary method to find any missing usercopy
3878 * whitelists.
3879 */
3880 object_size = slab_ksize(s);
2d891fbc
KC
3881 if (usercopy_fallback &&
3882 offset <= object_size && n <= object_size - offset) {
afcc90f8
KC
3883 usercopy_warn("SLUB object", s->name, to_user, offset, n);
3884 return;
3885 }
ed18adc1 3886
f4e6e289 3887 usercopy_abort("SLUB object", s->name, to_user, offset, n);
ed18adc1
KC
3888}
3889#endif /* CONFIG_HARDENED_USERCOPY */
3890
0316bec2 3891static size_t __ksize(const void *object)
81819f0f 3892{
272c1d21 3893 struct page *page;
81819f0f 3894
ef8b4520 3895 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
3896 return 0;
3897
294a80a8 3898 page = virt_to_head_page(object);
294a80a8 3899
76994412
PE
3900 if (unlikely(!PageSlab(page))) {
3901 WARN_ON(!PageCompound(page));
294a80a8 3902 return PAGE_SIZE << compound_order(page);
76994412 3903 }
81819f0f 3904
1b4f59e3 3905 return slab_ksize(page->slab_cache);
81819f0f 3906}
0316bec2
AR
3907
3908size_t ksize(const void *object)
3909{
3910 size_t size = __ksize(object);
3911 /* We assume that ksize callers could use whole allocated area,
4ebb31a4
AP
3912 * so we need to unpoison this area.
3913 */
3914 kasan_unpoison_shadow(object, size);
0316bec2
AR
3915 return size;
3916}
b1aabecd 3917EXPORT_SYMBOL(ksize);
81819f0f
CL
3918
3919void kfree(const void *x)
3920{
81819f0f 3921 struct page *page;
5bb983b0 3922 void *object = (void *)x;
81819f0f 3923
2121db74
PE
3924 trace_kfree(_RET_IP_, x);
3925
2408c550 3926 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
3927 return;
3928
b49af68f 3929 page = virt_to_head_page(x);
aadb4bc4 3930 if (unlikely(!PageSlab(page))) {
0937502a 3931 BUG_ON(!PageCompound(page));
47adccce 3932 kfree_hook(object);
4949148a 3933 __free_pages(page, compound_order(page));
aadb4bc4
CL
3934 return;
3935 }
81084651 3936 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
81819f0f
CL
3937}
3938EXPORT_SYMBOL(kfree);
3939
832f37f5
VD
3940#define SHRINK_PROMOTE_MAX 32
3941
2086d26a 3942/*
832f37f5
VD
3943 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3944 * up most to the head of the partial lists. New allocations will then
3945 * fill those up and thus they can be removed from the partial lists.
672bba3a
CL
3946 *
3947 * The slabs with the least items are placed last. This results in them
3948 * being allocated from last increasing the chance that the last objects
3949 * are freed in them.
2086d26a 3950 */
c9fc5864 3951int __kmem_cache_shrink(struct kmem_cache *s)
2086d26a
CL
3952{
3953 int node;
3954 int i;
3955 struct kmem_cache_node *n;
3956 struct page *page;
3957 struct page *t;
832f37f5
VD
3958 struct list_head discard;
3959 struct list_head promote[SHRINK_PROMOTE_MAX];
2086d26a 3960 unsigned long flags;
ce3712d7 3961 int ret = 0;
2086d26a 3962
2086d26a 3963 flush_all(s);
fa45dc25 3964 for_each_kmem_cache_node(s, node, n) {
832f37f5
VD
3965 INIT_LIST_HEAD(&discard);
3966 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3967 INIT_LIST_HEAD(promote + i);
2086d26a
CL
3968
3969 spin_lock_irqsave(&n->list_lock, flags);
3970
3971 /*
832f37f5 3972 * Build lists of slabs to discard or promote.
2086d26a 3973 *
672bba3a
CL
3974 * Note that concurrent frees may occur while we hold the
3975 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
3976 */
3977 list_for_each_entry_safe(page, t, &n->partial, lru) {
832f37f5
VD
3978 int free = page->objects - page->inuse;
3979
3980 /* Do not reread page->inuse */
3981 barrier();
3982
3983 /* We do not keep full slabs on the list */
3984 BUG_ON(free <= 0);
3985
3986 if (free == page->objects) {
3987 list_move(&page->lru, &discard);
69cb8e6b 3988 n->nr_partial--;
832f37f5
VD
3989 } else if (free <= SHRINK_PROMOTE_MAX)
3990 list_move(&page->lru, promote + free - 1);
2086d26a
CL
3991 }
3992
2086d26a 3993 /*
832f37f5
VD
3994 * Promote the slabs filled up most to the head of the
3995 * partial list.
2086d26a 3996 */
832f37f5
VD
3997 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
3998 list_splice(promote + i, &n->partial);
2086d26a 3999
2086d26a 4000 spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b
CL
4001
4002 /* Release empty slabs */
832f37f5 4003 list_for_each_entry_safe(page, t, &discard, lru)
69cb8e6b 4004 discard_slab(s, page);
ce3712d7
VD
4005
4006 if (slabs_node(s, node))
4007 ret = 1;
2086d26a
CL
4008 }
4009
ce3712d7 4010 return ret;
2086d26a 4011}
2086d26a 4012
c9fc5864 4013#ifdef CONFIG_MEMCG
01fb58bc
TH
4014static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
4015{
50862ce7
TH
4016 /*
4017 * Called with all the locks held after a sched RCU grace period.
4018 * Even if @s becomes empty after shrinking, we can't know that @s
4019 * doesn't have allocations already in-flight and thus can't
4020 * destroy @s until the associated memcg is released.
4021 *
4022 * However, let's remove the sysfs files for empty caches here.
4023 * Each cache has a lot of interface files which aren't
4024 * particularly useful for empty draining caches; otherwise, we can
4025 * easily end up with millions of unnecessary sysfs files on
4026 * systems which have a lot of memory and transient cgroups.
4027 */
4028 if (!__kmem_cache_shrink(s))
4029 sysfs_slab_remove(s);
01fb58bc
TH
4030}
4031
c9fc5864
TH
4032void __kmemcg_cache_deactivate(struct kmem_cache *s)
4033{
4034 /*
4035 * Disable empty slabs caching. Used to avoid pinning offline
4036 * memory cgroups by kmem pages that can be freed.
4037 */
e6d0e1dc 4038 slub_set_cpu_partial(s, 0);
c9fc5864
TH
4039 s->min_partial = 0;
4040
4041 /*
4042 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
01fb58bc 4043 * we have to make sure the change is visible before shrinking.
c9fc5864 4044 */
01fb58bc 4045 slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
c9fc5864
TH
4046}
4047#endif
4048
b9049e23
YG
4049static int slab_mem_going_offline_callback(void *arg)
4050{
4051 struct kmem_cache *s;
4052
18004c5d 4053 mutex_lock(&slab_mutex);
b9049e23 4054 list_for_each_entry(s, &slab_caches, list)
c9fc5864 4055 __kmem_cache_shrink(s);
18004c5d 4056 mutex_unlock(&slab_mutex);
b9049e23
YG
4057
4058 return 0;
4059}
4060
4061static void slab_mem_offline_callback(void *arg)
4062{
4063 struct kmem_cache_node *n;
4064 struct kmem_cache *s;
4065 struct memory_notify *marg = arg;
4066 int offline_node;
4067
b9d5ab25 4068 offline_node = marg->status_change_nid_normal;
b9049e23
YG
4069
4070 /*
4071 * If the node still has available memory. we need kmem_cache_node
4072 * for it yet.
4073 */
4074 if (offline_node < 0)
4075 return;
4076
18004c5d 4077 mutex_lock(&slab_mutex);
b9049e23
YG
4078 list_for_each_entry(s, &slab_caches, list) {
4079 n = get_node(s, offline_node);
4080 if (n) {
4081 /*
4082 * if n->nr_slabs > 0, slabs still exist on the node
4083 * that is going down. We were unable to free them,
c9404c9c 4084 * and offline_pages() function shouldn't call this
b9049e23
YG
4085 * callback. So, we must fail.
4086 */
0f389ec6 4087 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
4088
4089 s->node[offline_node] = NULL;
8de66a0c 4090 kmem_cache_free(kmem_cache_node, n);
b9049e23
YG
4091 }
4092 }
18004c5d 4093 mutex_unlock(&slab_mutex);
b9049e23
YG
4094}
4095
4096static int slab_mem_going_online_callback(void *arg)
4097{
4098 struct kmem_cache_node *n;
4099 struct kmem_cache *s;
4100 struct memory_notify *marg = arg;
b9d5ab25 4101 int nid = marg->status_change_nid_normal;
b9049e23
YG
4102 int ret = 0;
4103
4104 /*
4105 * If the node's memory is already available, then kmem_cache_node is
4106 * already created. Nothing to do.
4107 */
4108 if (nid < 0)
4109 return 0;
4110
4111 /*
0121c619 4112 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
4113 * allocate a kmem_cache_node structure in order to bring the node
4114 * online.
4115 */
18004c5d 4116 mutex_lock(&slab_mutex);
b9049e23
YG
4117 list_for_each_entry(s, &slab_caches, list) {
4118 /*
4119 * XXX: kmem_cache_alloc_node will fallback to other nodes
4120 * since memory is not yet available from the node that
4121 * is brought up.
4122 */
8de66a0c 4123 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e23
YG
4124 if (!n) {
4125 ret = -ENOMEM;
4126 goto out;
4127 }
4053497d 4128 init_kmem_cache_node(n);
b9049e23
YG
4129 s->node[nid] = n;
4130 }
4131out:
18004c5d 4132 mutex_unlock(&slab_mutex);
b9049e23
YG
4133 return ret;
4134}
4135
4136static int slab_memory_callback(struct notifier_block *self,
4137 unsigned long action, void *arg)
4138{
4139 int ret = 0;
4140
4141 switch (action) {
4142 case MEM_GOING_ONLINE:
4143 ret = slab_mem_going_online_callback(arg);
4144 break;
4145 case MEM_GOING_OFFLINE:
4146 ret = slab_mem_going_offline_callback(arg);
4147 break;
4148 case MEM_OFFLINE:
4149 case MEM_CANCEL_ONLINE:
4150 slab_mem_offline_callback(arg);
4151 break;
4152 case MEM_ONLINE:
4153 case MEM_CANCEL_OFFLINE:
4154 break;
4155 }
dc19f9db
KH
4156 if (ret)
4157 ret = notifier_from_errno(ret);
4158 else
4159 ret = NOTIFY_OK;
b9049e23
YG
4160 return ret;
4161}
4162
3ac38faa
AM
4163static struct notifier_block slab_memory_callback_nb = {
4164 .notifier_call = slab_memory_callback,
4165 .priority = SLAB_CALLBACK_PRI,
4166};
b9049e23 4167
81819f0f
CL
4168/********************************************************************
4169 * Basic setup of slabs
4170 *******************************************************************/
4171
51df1142
CL
4172/*
4173 * Used for early kmem_cache structures that were allocated using
dffb4d60
CL
4174 * the page allocator. Allocate them properly then fix up the pointers
4175 * that may be pointing to the wrong kmem_cache structure.
51df1142
CL
4176 */
4177
dffb4d60 4178static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
51df1142
CL
4179{
4180 int node;
dffb4d60 4181 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
fa45dc25 4182 struct kmem_cache_node *n;
51df1142 4183
dffb4d60 4184 memcpy(s, static_cache, kmem_cache->object_size);
51df1142 4185
7d557b3c
GC
4186 /*
4187 * This runs very early, and only the boot processor is supposed to be
4188 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4189 * IPIs around.
4190 */
4191 __flush_cpu_slab(s, smp_processor_id());
fa45dc25 4192 for_each_kmem_cache_node(s, node, n) {
51df1142
CL
4193 struct page *p;
4194
fa45dc25
CL
4195 list_for_each_entry(p, &n->partial, lru)
4196 p->slab_cache = s;
51df1142 4197
607bf324 4198#ifdef CONFIG_SLUB_DEBUG
fa45dc25
CL
4199 list_for_each_entry(p, &n->full, lru)
4200 p->slab_cache = s;
51df1142 4201#endif
51df1142 4202 }
f7ce3190 4203 slab_init_memcg_params(s);
dffb4d60 4204 list_add(&s->list, &slab_caches);
510ded33 4205 memcg_link_cache(s);
dffb4d60 4206 return s;
51df1142
CL
4207}
4208
81819f0f
CL
4209void __init kmem_cache_init(void)
4210{
dffb4d60
CL
4211 static __initdata struct kmem_cache boot_kmem_cache,
4212 boot_kmem_cache_node;
51df1142 4213
fc8d8620
SG
4214 if (debug_guardpage_minorder())
4215 slub_max_order = 0;
4216
dffb4d60
CL
4217 kmem_cache_node = &boot_kmem_cache_node;
4218 kmem_cache = &boot_kmem_cache;
51df1142 4219
dffb4d60 4220 create_boot_cache(kmem_cache_node, "kmem_cache_node",
8eb8284b 4221 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
b9049e23 4222
3ac38faa 4223 register_hotmemory_notifier(&slab_memory_callback_nb);
81819f0f
CL
4224
4225 /* Able to allocate the per node structures */
4226 slab_state = PARTIAL;
4227
dffb4d60
CL
4228 create_boot_cache(kmem_cache, "kmem_cache",
4229 offsetof(struct kmem_cache, node) +
4230 nr_node_ids * sizeof(struct kmem_cache_node *),
8eb8284b 4231 SLAB_HWCACHE_ALIGN, 0, 0);
8a13a4cc 4232
dffb4d60 4233 kmem_cache = bootstrap(&boot_kmem_cache);
dffb4d60 4234 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
51df1142
CL
4235
4236 /* Now we can use the kmem_cache to allocate kmalloc slabs */
34cc6990 4237 setup_kmalloc_cache_index_table();
f97d5f63 4238 create_kmalloc_caches(0);
81819f0f 4239
210e7a43
TG
4240 /* Setup random freelists for each cache */
4241 init_freelist_randomization();
4242
a96a87bf
SAS
4243 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4244 slub_cpu_dead);
81819f0f 4245
19af27af 4246 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
f97d5f63 4247 cache_line_size(),
81819f0f
CL
4248 slub_min_order, slub_max_order, slub_min_objects,
4249 nr_cpu_ids, nr_node_ids);
4250}
4251
7e85ee0c
PE
4252void __init kmem_cache_init_late(void)
4253{
7e85ee0c
PE
4254}
4255
2633d7a0 4256struct kmem_cache *
f4957d5b 4257__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112ed 4258 slab_flags_t flags, void (*ctor)(void *))
81819f0f 4259{
426589f5 4260 struct kmem_cache *s, *c;
81819f0f 4261
a44cb944 4262 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
4263 if (s) {
4264 s->refcount++;
84d0ddd6 4265
81819f0f
CL
4266 /*
4267 * Adjust the object sizes so that we clear
4268 * the complete object on kzalloc.
4269 */
1b473f29 4270 s->object_size = max(s->object_size, size);
52ee6d74 4271 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
6446faa2 4272
426589f5 4273 for_each_memcg_cache(c, s) {
84d0ddd6 4274 c->object_size = s->object_size;
52ee6d74 4275 c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
84d0ddd6
VD
4276 }
4277
7b8f3b66 4278 if (sysfs_slab_alias(s, name)) {
7b8f3b66 4279 s->refcount--;
cbb79694 4280 s = NULL;
7b8f3b66 4281 }
a0e1d1be 4282 }
6446faa2 4283
cbb79694
CL
4284 return s;
4285}
84c1cf62 4286
d50112ed 4287int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
cbb79694 4288{
aac3a166
PE
4289 int err;
4290
4291 err = kmem_cache_open(s, flags);
4292 if (err)
4293 return err;
20cea968 4294
45530c44
CL
4295 /* Mutex is not taken during early boot */
4296 if (slab_state <= UP)
4297 return 0;
4298
107dab5c 4299 memcg_propagate_slab_attrs(s);
aac3a166 4300 err = sysfs_slab_add(s);
aac3a166 4301 if (err)
52b4b950 4302 __kmem_cache_release(s);
20cea968 4303
aac3a166 4304 return err;
81819f0f 4305}
81819f0f 4306
ce71e27c 4307void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0f 4308{
aadb4bc4 4309 struct kmem_cache *s;
94b528d0 4310 void *ret;
aadb4bc4 4311
95a05b42 4312 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef
PE
4313 return kmalloc_large(size, gfpflags);
4314
2c59dd65 4315 s = kmalloc_slab(size, gfpflags);
81819f0f 4316
2408c550 4317 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4318 return s;
81819f0f 4319
2b847c3c 4320 ret = slab_alloc(s, gfpflags, caller);
94b528d0 4321
25985edc 4322 /* Honor the call site pointer we received. */
ca2b84cb 4323 trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d0
EGM
4324
4325 return ret;
81819f0f
CL
4326}
4327
5d1f57e4 4328#ifdef CONFIG_NUMA
81819f0f 4329void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c 4330 int node, unsigned long caller)
81819f0f 4331{
aadb4bc4 4332 struct kmem_cache *s;
94b528d0 4333 void *ret;
aadb4bc4 4334
95a05b42 4335 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
d3e14aa3
XF
4336 ret = kmalloc_large_node(size, gfpflags, node);
4337
4338 trace_kmalloc_node(caller, ret,
4339 size, PAGE_SIZE << get_order(size),
4340 gfpflags, node);
4341
4342 return ret;
4343 }
eada35ef 4344
2c59dd65 4345 s = kmalloc_slab(size, gfpflags);
81819f0f 4346
2408c550 4347 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 4348 return s;
81819f0f 4349
2b847c3c 4350 ret = slab_alloc_node(s, gfpflags, node, caller);
94b528d0 4351
25985edc 4352 /* Honor the call site pointer we received. */
ca2b84cb 4353 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d0
EGM
4354
4355 return ret;
81819f0f 4356}
5d1f57e4 4357#endif
81819f0f 4358
ab4d5ed5 4359#ifdef CONFIG_SYSFS
205ab99d
CL
4360static int count_inuse(struct page *page)
4361{
4362 return page->inuse;
4363}
4364
4365static int count_total(struct page *page)
4366{
4367 return page->objects;
4368}
ab4d5ed5 4369#endif
205ab99d 4370
ab4d5ed5 4371#ifdef CONFIG_SLUB_DEBUG
434e245d
CL
4372static int validate_slab(struct kmem_cache *s, struct page *page,
4373 unsigned long *map)
53e15af0
CL
4374{
4375 void *p;
a973e9dd 4376 void *addr = page_address(page);
53e15af0
CL
4377
4378 if (!check_slab(s, page) ||
4379 !on_freelist(s, page, NULL))
4380 return 0;
4381
4382 /* Now we know that a valid freelist exists */
39b26464 4383 bitmap_zero(map, page->objects);
53e15af0 4384
5f80b13a
CL
4385 get_map(s, page, map);
4386 for_each_object(p, s, addr, page->objects) {
4387 if (test_bit(slab_index(p, s, addr), map))
4388 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4389 return 0;
53e15af0
CL
4390 }
4391
224a88be 4392 for_each_object(p, s, addr, page->objects)
7656c72b 4393 if (!test_bit(slab_index(p, s, addr), map))
37d57443 4394 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af0
CL
4395 return 0;
4396 return 1;
4397}
4398
434e245d
CL
4399static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4400 unsigned long *map)
53e15af0 4401{
881db7fb
CL
4402 slab_lock(page);
4403 validate_slab(s, page, map);
4404 slab_unlock(page);
53e15af0
CL
4405}
4406
434e245d
CL
4407static int validate_slab_node(struct kmem_cache *s,
4408 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
4409{
4410 unsigned long count = 0;
4411 struct page *page;
4412 unsigned long flags;
4413
4414 spin_lock_irqsave(&n->list_lock, flags);
4415
4416 list_for_each_entry(page, &n->partial, lru) {
434e245d 4417 validate_slab_slab(s, page, map);
53e15af0
CL
4418 count++;
4419 }
4420 if (count != n->nr_partial)
f9f58285
FF
4421 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4422 s->name, count, n->nr_partial);
53e15af0
CL
4423
4424 if (!(s->flags & SLAB_STORE_USER))
4425 goto out;
4426
4427 list_for_each_entry(page, &n->full, lru) {
434e245d 4428 validate_slab_slab(s, page, map);
53e15af0
CL
4429 count++;
4430 }
4431 if (count != atomic_long_read(&n->nr_slabs))
f9f58285
FF
4432 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4433 s->name, count, atomic_long_read(&n->nr_slabs));
53e15af0
CL
4434
4435out:
4436 spin_unlock_irqrestore(&n->list_lock, flags);
4437 return count;
4438}
4439
434e245d 4440static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
4441{
4442 int node;
4443 unsigned long count = 0;
fa45dc25 4444 struct kmem_cache_node *n;
0684e652 4445 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
434e245d
CL
4446
4447 if (!map)
4448 return -ENOMEM;
53e15af0
CL
4449
4450 flush_all(s);
fa45dc25 4451 for_each_kmem_cache_node(s, node, n)
434e245d 4452 count += validate_slab_node(s, n, map);
0684e652 4453 bitmap_free(map);
53e15af0
CL
4454 return count;
4455}
88a420e4 4456/*
672bba3a 4457 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
4458 * and freed.
4459 */
4460
4461struct location {
4462 unsigned long count;
ce71e27c 4463 unsigned long addr;
45edfa58
CL
4464 long long sum_time;
4465 long min_time;
4466 long max_time;
4467 long min_pid;
4468 long max_pid;
174596a0 4469 DECLARE_BITMAP(cpus, NR_CPUS);
45edfa58 4470 nodemask_t nodes;
88a420e4
CL
4471};
4472
4473struct loc_track {
4474 unsigned long max;
4475 unsigned long count;
4476 struct location *loc;
4477};
4478
4479static void free_loc_track(struct loc_track *t)
4480{
4481 if (t->max)
4482 free_pages((unsigned long)t->loc,
4483 get_order(sizeof(struct location) * t->max));
4484}
4485
68dff6a9 4486static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
4487{
4488 struct location *l;
4489 int order;
4490
88a420e4
CL
4491 order = get_order(sizeof(struct location) * max);
4492
68dff6a9 4493 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
4494 if (!l)
4495 return 0;
4496
4497 if (t->count) {
4498 memcpy(l, t->loc, sizeof(struct location) * t->count);
4499 free_loc_track(t);
4500 }
4501 t->max = max;
4502 t->loc = l;
4503 return 1;
4504}
4505
4506static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 4507 const struct track *track)
88a420e4
CL
4508{
4509 long start, end, pos;
4510 struct location *l;
ce71e27c 4511 unsigned long caddr;
45edfa58 4512 unsigned long age = jiffies - track->when;
88a420e4
CL
4513
4514 start = -1;
4515 end = t->count;
4516
4517 for ( ; ; ) {
4518 pos = start + (end - start + 1) / 2;
4519
4520 /*
4521 * There is nothing at "end". If we end up there
4522 * we need to add something to before end.
4523 */
4524 if (pos == end)
4525 break;
4526
4527 caddr = t->loc[pos].addr;
45edfa58
CL
4528 if (track->addr == caddr) {
4529
4530 l = &t->loc[pos];
4531 l->count++;
4532 if (track->when) {
4533 l->sum_time += age;
4534 if (age < l->min_time)
4535 l->min_time = age;
4536 if (age > l->max_time)
4537 l->max_time = age;
4538
4539 if (track->pid < l->min_pid)
4540 l->min_pid = track->pid;
4541 if (track->pid > l->max_pid)
4542 l->max_pid = track->pid;
4543
174596a0
RR
4544 cpumask_set_cpu(track->cpu,
4545 to_cpumask(l->cpus));
45edfa58
CL
4546 }
4547 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4548 return 1;
4549 }
4550
45edfa58 4551 if (track->addr < caddr)
88a420e4
CL
4552 end = pos;
4553 else
4554 start = pos;
4555 }
4556
4557 /*
672bba3a 4558 * Not found. Insert new tracking element.
88a420e4 4559 */
68dff6a9 4560 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
4561 return 0;
4562
4563 l = t->loc + pos;
4564 if (pos < t->count)
4565 memmove(l + 1, l,
4566 (t->count - pos) * sizeof(struct location));
4567 t->count++;
4568 l->count = 1;
45edfa58
CL
4569 l->addr = track->addr;
4570 l->sum_time = age;
4571 l->min_time = age;
4572 l->max_time = age;
4573 l->min_pid = track->pid;
4574 l->max_pid = track->pid;
174596a0
RR
4575 cpumask_clear(to_cpumask(l->cpus));
4576 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa58
CL
4577 nodes_clear(l->nodes);
4578 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4579 return 1;
4580}
4581
4582static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57b 4583 struct page *page, enum track_item alloc,
a5dd5c11 4584 unsigned long *map)
88a420e4 4585{
a973e9dd 4586 void *addr = page_address(page);
88a420e4
CL
4587 void *p;
4588
39b26464 4589 bitmap_zero(map, page->objects);
5f80b13a 4590 get_map(s, page, map);
88a420e4 4591
224a88be 4592 for_each_object(p, s, addr, page->objects)
45edfa58
CL
4593 if (!test_bit(slab_index(p, s, addr), map))
4594 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
4595}
4596
4597static int list_locations(struct kmem_cache *s, char *buf,
4598 enum track_item alloc)
4599{
e374d483 4600 int len = 0;
88a420e4 4601 unsigned long i;
68dff6a9 4602 struct loc_track t = { 0, 0, NULL };
88a420e4 4603 int node;
fa45dc25 4604 struct kmem_cache_node *n;
0684e652 4605 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
88a420e4 4606
bbd7d57b 4607 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
0ee931c4 4608 GFP_KERNEL)) {
0684e652 4609 bitmap_free(map);
68dff6a9 4610 return sprintf(buf, "Out of memory\n");
bbd7d57b 4611 }
88a420e4
CL
4612 /* Push back cpu slabs */
4613 flush_all(s);
4614
fa45dc25 4615 for_each_kmem_cache_node(s, node, n) {
88a420e4
CL
4616 unsigned long flags;
4617 struct page *page;
4618
9e86943b 4619 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
4620 continue;
4621
4622 spin_lock_irqsave(&n->list_lock, flags);
4623 list_for_each_entry(page, &n->partial, lru)
bbd7d57b 4624 process_slab(&t, s, page, alloc, map);
88a420e4 4625 list_for_each_entry(page, &n->full, lru)
bbd7d57b 4626 process_slab(&t, s, page, alloc, map);
88a420e4
CL
4627 spin_unlock_irqrestore(&n->list_lock, flags);
4628 }
4629
4630 for (i = 0; i < t.count; i++) {
45edfa58 4631 struct location *l = &t.loc[i];
88a420e4 4632
9c246247 4633 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4 4634 break;
e374d483 4635 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
4636
4637 if (l->addr)
62c70bce 4638 len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4 4639 else
e374d483 4640 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
4641
4642 if (l->sum_time != l->min_time) {
e374d483 4643 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
4644 l->min_time,
4645 (long)div_u64(l->sum_time, l->count),
4646 l->max_time);
45edfa58 4647 } else
e374d483 4648 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
4649 l->min_time);
4650
4651 if (l->min_pid != l->max_pid)
e374d483 4652 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
4653 l->min_pid, l->max_pid);
4654 else
e374d483 4655 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
4656 l->min_pid);
4657
174596a0
RR
4658 if (num_online_cpus() > 1 &&
4659 !cpumask_empty(to_cpumask(l->cpus)) &&
5024c1d7
TH
4660 len < PAGE_SIZE - 60)
4661 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4662 " cpus=%*pbl",
4663 cpumask_pr_args(to_cpumask(l->cpus)));
45edfa58 4664
62bc62a8 4665 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
5024c1d7
TH
4666 len < PAGE_SIZE - 60)
4667 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4668 " nodes=%*pbl",
4669 nodemask_pr_args(&l->nodes));
45edfa58 4670
e374d483 4671 len += sprintf(buf + len, "\n");
88a420e4
CL
4672 }
4673
4674 free_loc_track(&t);
0684e652 4675 bitmap_free(map);
88a420e4 4676 if (!t.count)
e374d483
HH
4677 len += sprintf(buf, "No data\n");
4678 return len;
88a420e4 4679}
ab4d5ed5 4680#endif
88a420e4 4681
a5a84755 4682#ifdef SLUB_RESILIENCY_TEST
c07b8183 4683static void __init resiliency_test(void)
a5a84755
CL
4684{
4685 u8 *p;
cc252eae 4686 int type = KMALLOC_NORMAL;
a5a84755 4687
95a05b42 4688 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
a5a84755 4689
f9f58285
FF
4690 pr_err("SLUB resiliency testing\n");
4691 pr_err("-----------------------\n");
4692 pr_err("A. Corruption after allocation\n");
a5a84755
CL
4693
4694 p = kzalloc(16, GFP_KERNEL);
4695 p[16] = 0x12;
f9f58285
FF
4696 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4697 p + 16);
a5a84755 4698
cc252eae 4699 validate_slab_cache(kmalloc_caches[type][4]);
a5a84755
CL
4700
4701 /* Hmmm... The next two are dangerous */
4702 p = kzalloc(32, GFP_KERNEL);
4703 p[32 + sizeof(void *)] = 0x34;
f9f58285
FF
4704 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4705 p);
4706 pr_err("If allocated object is overwritten then not detectable\n\n");
a5a84755 4707
cc252eae 4708 validate_slab_cache(kmalloc_caches[type][5]);
a5a84755
CL
4709 p = kzalloc(64, GFP_KERNEL);
4710 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4711 *p = 0x56;
f9f58285
FF
4712 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4713 p);
4714 pr_err("If allocated object is overwritten then not detectable\n\n");
cc252eae 4715 validate_slab_cache(kmalloc_caches[type][6]);
a5a84755 4716
f9f58285 4717 pr_err("\nB. Corruption after free\n");
a5a84755
CL
4718 p = kzalloc(128, GFP_KERNEL);
4719 kfree(p);
4720 *p = 0x78;
f9f58285 4721 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
cc252eae 4722 validate_slab_cache(kmalloc_caches[type][7]);
a5a84755
CL
4723
4724 p = kzalloc(256, GFP_KERNEL);
4725 kfree(p);
4726 p[50] = 0x9a;
f9f58285 4727 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
cc252eae 4728 validate_slab_cache(kmalloc_caches[type][8]);
a5a84755
CL
4729
4730 p = kzalloc(512, GFP_KERNEL);
4731 kfree(p);
4732 p[512] = 0xab;
f9f58285 4733 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
cc252eae 4734 validate_slab_cache(kmalloc_caches[type][9]);
a5a84755
CL
4735}
4736#else
4737#ifdef CONFIG_SYSFS
4738static void resiliency_test(void) {};
4739#endif
4740#endif
4741
ab4d5ed5 4742#ifdef CONFIG_SYSFS
81819f0f 4743enum slab_stat_type {
205ab99d
CL
4744 SL_ALL, /* All slabs */
4745 SL_PARTIAL, /* Only partially allocated slabs */
4746 SL_CPU, /* Only slabs used for cpu caches */
4747 SL_OBJECTS, /* Determine allocated objects not slabs */
4748 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
4749};
4750
205ab99d 4751#define SO_ALL (1 << SL_ALL)
81819f0f
CL
4752#define SO_PARTIAL (1 << SL_PARTIAL)
4753#define SO_CPU (1 << SL_CPU)
4754#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 4755#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 4756
1663f26d
TH
4757#ifdef CONFIG_MEMCG
4758static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4759
4760static int __init setup_slub_memcg_sysfs(char *str)
4761{
4762 int v;
4763
4764 if (get_option(&str, &v) > 0)
4765 memcg_sysfs_enabled = v;
4766
4767 return 1;
4768}
4769
4770__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4771#endif
4772
62e5c4b4
CG
4773static ssize_t show_slab_objects(struct kmem_cache *s,
4774 char *buf, unsigned long flags)
81819f0f
CL
4775{
4776 unsigned long total = 0;
81819f0f
CL
4777 int node;
4778 int x;
4779 unsigned long *nodes;
81819f0f 4780
6396bb22 4781 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
62e5c4b4
CG
4782 if (!nodes)
4783 return -ENOMEM;
81819f0f 4784
205ab99d
CL
4785 if (flags & SO_CPU) {
4786 int cpu;
81819f0f 4787
205ab99d 4788 for_each_possible_cpu(cpu) {
d0e0ac97
CG
4789 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4790 cpu);
ec3ab083 4791 int node;
49e22585 4792 struct page *page;
dfb4f096 4793
4db0c3c2 4794 page = READ_ONCE(c->page);
ec3ab083
CL
4795 if (!page)
4796 continue;
205ab99d 4797
ec3ab083
CL
4798 node = page_to_nid(page);
4799 if (flags & SO_TOTAL)
4800 x = page->objects;
4801 else if (flags & SO_OBJECTS)
4802 x = page->inuse;
4803 else
4804 x = 1;
49e22585 4805
ec3ab083
CL
4806 total += x;
4807 nodes[node] += x;
4808
a93cf07b 4809 page = slub_percpu_partial_read_once(c);
49e22585 4810 if (page) {
8afb1474
LZ
4811 node = page_to_nid(page);
4812 if (flags & SO_TOTAL)
4813 WARN_ON_ONCE(1);
4814 else if (flags & SO_OBJECTS)
4815 WARN_ON_ONCE(1);
4816 else
4817 x = page->pages;
bc6697d8
ED
4818 total += x;
4819 nodes[node] += x;
49e22585 4820 }
81819f0f
CL
4821 }
4822 }
4823
bfc8c901 4824 get_online_mems();
ab4d5ed5 4825#ifdef CONFIG_SLUB_DEBUG
205ab99d 4826 if (flags & SO_ALL) {
fa45dc25
CL
4827 struct kmem_cache_node *n;
4828
4829 for_each_kmem_cache_node(s, node, n) {
205ab99d 4830
d0e0ac97
CG
4831 if (flags & SO_TOTAL)
4832 x = atomic_long_read(&n->total_objects);
4833 else if (flags & SO_OBJECTS)
4834 x = atomic_long_read(&n->total_objects) -
4835 count_partial(n, count_free);
81819f0f 4836 else
205ab99d 4837 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
4838 total += x;
4839 nodes[node] += x;
4840 }
4841
ab4d5ed5
CL
4842 } else
4843#endif
4844 if (flags & SO_PARTIAL) {
fa45dc25 4845 struct kmem_cache_node *n;
81819f0f 4846
fa45dc25 4847 for_each_kmem_cache_node(s, node, n) {
205ab99d
CL
4848 if (flags & SO_TOTAL)
4849 x = count_partial(n, count_total);
4850 else if (flags & SO_OBJECTS)
4851 x = count_partial(n, count_inuse);
81819f0f 4852 else
205ab99d 4853 x = n->nr_partial;
81819f0f
CL
4854 total += x;
4855 nodes[node] += x;
4856 }
4857 }
81819f0f
CL
4858 x = sprintf(buf, "%lu", total);
4859#ifdef CONFIG_NUMA
fa45dc25 4860 for (node = 0; node < nr_node_ids; node++)
81819f0f
CL
4861 if (nodes[node])
4862 x += sprintf(buf + x, " N%d=%lu",
4863 node, nodes[node]);
4864#endif
bfc8c901 4865 put_online_mems();
81819f0f
CL
4866 kfree(nodes);
4867 return x + sprintf(buf + x, "\n");
4868}
4869
ab4d5ed5 4870#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
4871static int any_slab_objects(struct kmem_cache *s)
4872{
4873 int node;
fa45dc25 4874 struct kmem_cache_node *n;
81819f0f 4875
fa45dc25 4876 for_each_kmem_cache_node(s, node, n)
4ea33e2d 4877 if (atomic_long_read(&n->total_objects))
81819f0f 4878 return 1;
fa45dc25 4879
81819f0f
CL
4880 return 0;
4881}
ab4d5ed5 4882#endif
81819f0f
CL
4883
4884#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf 4885#define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0f
CL
4886
4887struct slab_attribute {
4888 struct attribute attr;
4889 ssize_t (*show)(struct kmem_cache *s, char *buf);
4890 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4891};
4892
4893#define SLAB_ATTR_RO(_name) \
ab067e99
VK
4894 static struct slab_attribute _name##_attr = \
4895 __ATTR(_name, 0400, _name##_show, NULL)
81819f0f
CL
4896
4897#define SLAB_ATTR(_name) \
4898 static struct slab_attribute _name##_attr = \
ab067e99 4899 __ATTR(_name, 0600, _name##_show, _name##_store)
81819f0f 4900
81819f0f
CL
4901static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4902{
44065b2e 4903 return sprintf(buf, "%u\n", s->size);
81819f0f
CL
4904}
4905SLAB_ATTR_RO(slab_size);
4906
4907static ssize_t align_show(struct kmem_cache *s, char *buf)
4908{
3a3791ec 4909 return sprintf(buf, "%u\n", s->align);
81819f0f
CL
4910}
4911SLAB_ATTR_RO(align);
4912
4913static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4914{
1b473f29 4915 return sprintf(buf, "%u\n", s->object_size);
81819f0f
CL
4916}
4917SLAB_ATTR_RO(object_size);
4918
4919static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4920{
19af27af 4921 return sprintf(buf, "%u\n", oo_objects(s->oo));
81819f0f
CL
4922}
4923SLAB_ATTR_RO(objs_per_slab);
4924
06b285dc
CL
4925static ssize_t order_store(struct kmem_cache *s,
4926 const char *buf, size_t length)
4927{
19af27af 4928 unsigned int order;
0121c619
CL
4929 int err;
4930
19af27af 4931 err = kstrtouint(buf, 10, &order);
0121c619
CL
4932 if (err)
4933 return err;
06b285dc
CL
4934
4935 if (order > slub_max_order || order < slub_min_order)
4936 return -EINVAL;
4937
4938 calculate_sizes(s, order);
4939 return length;
4940}
4941
81819f0f
CL
4942static ssize_t order_show(struct kmem_cache *s, char *buf)
4943{
19af27af 4944 return sprintf(buf, "%u\n", oo_order(s->oo));
81819f0f 4945}
06b285dc 4946SLAB_ATTR(order);
81819f0f 4947
73d342b1
DR
4948static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4949{
4950 return sprintf(buf, "%lu\n", s->min_partial);
4951}
4952
4953static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4954 size_t length)
4955{
4956 unsigned long min;
4957 int err;
4958
3dbb95f7 4959 err = kstrtoul(buf, 10, &min);
73d342b1
DR
4960 if (err)
4961 return err;
4962
c0bdb232 4963 set_min_partial(s, min);
73d342b1
DR
4964 return length;
4965}
4966SLAB_ATTR(min_partial);
4967
49e22585
CL
4968static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4969{
e6d0e1dc 4970 return sprintf(buf, "%u\n", slub_cpu_partial(s));
49e22585
CL
4971}
4972
4973static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4974 size_t length)
4975{
e5d9998f 4976 unsigned int objects;
49e22585
CL
4977 int err;
4978
e5d9998f 4979 err = kstrtouint(buf, 10, &objects);
49e22585
CL
4980 if (err)
4981 return err;
345c905d 4982 if (objects && !kmem_cache_has_cpu_partial(s))
74ee4ef1 4983 return -EINVAL;
49e22585 4984
e6d0e1dc 4985 slub_set_cpu_partial(s, objects);
49e22585
CL
4986 flush_all(s);
4987 return length;
4988}
4989SLAB_ATTR(cpu_partial);
4990
81819f0f
CL
4991static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4992{
62c70bce
JP
4993 if (!s->ctor)
4994 return 0;
4995 return sprintf(buf, "%pS\n", s->ctor);
81819f0f
CL
4996}
4997SLAB_ATTR_RO(ctor);
4998
81819f0f
CL
4999static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5000{
4307c14f 5001 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
81819f0f
CL
5002}
5003SLAB_ATTR_RO(aliases);
5004
81819f0f
CL
5005static ssize_t partial_show(struct kmem_cache *s, char *buf)
5006{
d9acf4b7 5007 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
5008}
5009SLAB_ATTR_RO(partial);
5010
5011static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5012{
d9acf4b7 5013 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
5014}
5015SLAB_ATTR_RO(cpu_slabs);
5016
5017static ssize_t objects_show(struct kmem_cache *s, char *buf)
5018{
205ab99d 5019 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
5020}
5021SLAB_ATTR_RO(objects);
5022
205ab99d
CL
5023static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5024{
5025 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5026}
5027SLAB_ATTR_RO(objects_partial);
5028
49e22585
CL
5029static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5030{
5031 int objects = 0;
5032 int pages = 0;
5033 int cpu;
5034 int len;
5035
5036 for_each_online_cpu(cpu) {
a93cf07b
WY
5037 struct page *page;
5038
5039 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5040
5041 if (page) {
5042 pages += page->pages;
5043 objects += page->pobjects;
5044 }
5045 }
5046
5047 len = sprintf(buf, "%d(%d)", objects, pages);
5048
5049#ifdef CONFIG_SMP
5050 for_each_online_cpu(cpu) {
a93cf07b
WY
5051 struct page *page;
5052
5053 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
49e22585
CL
5054
5055 if (page && len < PAGE_SIZE - 20)
5056 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5057 page->pobjects, page->pages);
5058 }
5059#endif
5060 return len + sprintf(buf + len, "\n");
5061}
5062SLAB_ATTR_RO(slabs_cpu_partial);
5063
a5a84755
CL
5064static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5065{
5066 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5067}
5068
5069static ssize_t reclaim_account_store(struct kmem_cache *s,
5070 const char *buf, size_t length)
5071{
5072 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5073 if (buf[0] == '1')
5074 s->flags |= SLAB_RECLAIM_ACCOUNT;
5075 return length;
5076}
5077SLAB_ATTR(reclaim_account);
5078
5079static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5080{
5081 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5082}
5083SLAB_ATTR_RO(hwcache_align);
5084
5085#ifdef CONFIG_ZONE_DMA
5086static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5087{
5088 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5089}
5090SLAB_ATTR_RO(cache_dma);
5091#endif
5092
8eb8284b
DW
5093static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5094{
7bbdb81e 5095 return sprintf(buf, "%u\n", s->usersize);
8eb8284b
DW
5096}
5097SLAB_ATTR_RO(usersize);
5098
a5a84755
CL
5099static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5100{
5f0d5a3a 5101 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
a5a84755
CL
5102}
5103SLAB_ATTR_RO(destroy_by_rcu);
5104
ab4d5ed5 5105#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5106static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5107{
5108 return show_slab_objects(s, buf, SO_ALL);
5109}
5110SLAB_ATTR_RO(slabs);
5111
205ab99d
CL
5112static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5113{
5114 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5115}
5116SLAB_ATTR_RO(total_objects);
5117
81819f0f
CL
5118static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5119{
becfda68 5120 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
81819f0f
CL
5121}
5122
5123static ssize_t sanity_checks_store(struct kmem_cache *s,
5124 const char *buf, size_t length)
5125{
becfda68 5126 s->flags &= ~SLAB_CONSISTENCY_CHECKS;
b789ef51
CL
5127 if (buf[0] == '1') {
5128 s->flags &= ~__CMPXCHG_DOUBLE;
becfda68 5129 s->flags |= SLAB_CONSISTENCY_CHECKS;
b789ef51 5130 }
81819f0f
CL
5131 return length;
5132}
5133SLAB_ATTR(sanity_checks);
5134
5135static ssize_t trace_show(struct kmem_cache *s, char *buf)
5136{
5137 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5138}
5139
5140static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5141 size_t length)
5142{
c9e16131
CL
5143 /*
5144 * Tracing a merged cache is going to give confusing results
5145 * as well as cause other issues like converting a mergeable
5146 * cache into an umergeable one.
5147 */
5148 if (s->refcount > 1)
5149 return -EINVAL;
5150
81819f0f 5151 s->flags &= ~SLAB_TRACE;
b789ef51
CL
5152 if (buf[0] == '1') {
5153 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5154 s->flags |= SLAB_TRACE;
b789ef51 5155 }
81819f0f
CL
5156 return length;
5157}
5158SLAB_ATTR(trace);
5159
81819f0f
CL
5160static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5161{
5162 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5163}
5164
5165static ssize_t red_zone_store(struct kmem_cache *s,
5166 const char *buf, size_t length)
5167{
5168 if (any_slab_objects(s))
5169 return -EBUSY;
5170
5171 s->flags &= ~SLAB_RED_ZONE;
b789ef51 5172 if (buf[0] == '1') {
81819f0f 5173 s->flags |= SLAB_RED_ZONE;
b789ef51 5174 }
06b285dc 5175 calculate_sizes(s, -1);
81819f0f
CL
5176 return length;
5177}
5178SLAB_ATTR(red_zone);
5179
5180static ssize_t poison_show(struct kmem_cache *s, char *buf)
5181{
5182 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5183}
5184
5185static ssize_t poison_store(struct kmem_cache *s,
5186 const char *buf, size_t length)
5187{
5188 if (any_slab_objects(s))
5189 return -EBUSY;
5190
5191 s->flags &= ~SLAB_POISON;
b789ef51 5192 if (buf[0] == '1') {
81819f0f 5193 s->flags |= SLAB_POISON;
b789ef51 5194 }
06b285dc 5195 calculate_sizes(s, -1);
81819f0f
CL
5196 return length;
5197}
5198SLAB_ATTR(poison);
5199
5200static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5201{
5202 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5203}
5204
5205static ssize_t store_user_store(struct kmem_cache *s,
5206 const char *buf, size_t length)
5207{
5208 if (any_slab_objects(s))
5209 return -EBUSY;
5210
5211 s->flags &= ~SLAB_STORE_USER;
b789ef51
CL
5212 if (buf[0] == '1') {
5213 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 5214 s->flags |= SLAB_STORE_USER;
b789ef51 5215 }
06b285dc 5216 calculate_sizes(s, -1);
81819f0f
CL
5217 return length;
5218}
5219SLAB_ATTR(store_user);
5220
53e15af0
CL
5221static ssize_t validate_show(struct kmem_cache *s, char *buf)
5222{
5223 return 0;
5224}
5225
5226static ssize_t validate_store(struct kmem_cache *s,
5227 const char *buf, size_t length)
5228{
434e245d
CL
5229 int ret = -EINVAL;
5230
5231 if (buf[0] == '1') {
5232 ret = validate_slab_cache(s);
5233 if (ret >= 0)
5234 ret = length;
5235 }
5236 return ret;
53e15af0
CL
5237}
5238SLAB_ATTR(validate);
a5a84755
CL
5239
5240static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5241{
5242 if (!(s->flags & SLAB_STORE_USER))
5243 return -ENOSYS;
5244 return list_locations(s, buf, TRACK_ALLOC);
5245}
5246SLAB_ATTR_RO(alloc_calls);
5247
5248static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5249{
5250 if (!(s->flags & SLAB_STORE_USER))
5251 return -ENOSYS;
5252 return list_locations(s, buf, TRACK_FREE);
5253}
5254SLAB_ATTR_RO(free_calls);
5255#endif /* CONFIG_SLUB_DEBUG */
5256
5257#ifdef CONFIG_FAILSLAB
5258static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5259{
5260 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5261}
5262
5263static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5264 size_t length)
5265{
c9e16131
CL
5266 if (s->refcount > 1)
5267 return -EINVAL;
5268
a5a84755
CL
5269 s->flags &= ~SLAB_FAILSLAB;
5270 if (buf[0] == '1')
5271 s->flags |= SLAB_FAILSLAB;
5272 return length;
5273}
5274SLAB_ATTR(failslab);
ab4d5ed5 5275#endif
53e15af0 5276
2086d26a
CL
5277static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5278{
5279 return 0;
5280}
5281
5282static ssize_t shrink_store(struct kmem_cache *s,
5283 const char *buf, size_t length)
5284{
832f37f5
VD
5285 if (buf[0] == '1')
5286 kmem_cache_shrink(s);
5287 else
2086d26a
CL
5288 return -EINVAL;
5289 return length;
5290}
5291SLAB_ATTR(shrink);
5292
81819f0f 5293#ifdef CONFIG_NUMA
9824601e 5294static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 5295{
eb7235eb 5296 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
5297}
5298
9824601e 5299static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
5300 const char *buf, size_t length)
5301{
eb7235eb 5302 unsigned int ratio;
0121c619
CL
5303 int err;
5304
eb7235eb 5305 err = kstrtouint(buf, 10, &ratio);
0121c619
CL
5306 if (err)
5307 return err;
eb7235eb
AD
5308 if (ratio > 100)
5309 return -ERANGE;
0121c619 5310
eb7235eb 5311 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 5312
81819f0f
CL
5313 return length;
5314}
9824601e 5315SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
5316#endif
5317
8ff12cfc 5318#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
5319static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5320{
5321 unsigned long sum = 0;
5322 int cpu;
5323 int len;
6da2ec56 5324 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
8ff12cfc
CL
5325
5326 if (!data)
5327 return -ENOMEM;
5328
5329 for_each_online_cpu(cpu) {
9dfc6e68 5330 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc
CL
5331
5332 data[cpu] = x;
5333 sum += x;
5334 }
5335
5336 len = sprintf(buf, "%lu", sum);
5337
50ef37b9 5338#ifdef CONFIG_SMP
8ff12cfc
CL
5339 for_each_online_cpu(cpu) {
5340 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 5341 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 5342 }
50ef37b9 5343#endif
8ff12cfc
CL
5344 kfree(data);
5345 return len + sprintf(buf + len, "\n");
5346}
5347
78eb00cc
DR
5348static void clear_stat(struct kmem_cache *s, enum stat_item si)
5349{
5350 int cpu;
5351
5352 for_each_online_cpu(cpu)
9dfc6e68 5353 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc
DR
5354}
5355
8ff12cfc
CL
5356#define STAT_ATTR(si, text) \
5357static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5358{ \
5359 return show_stat(s, buf, si); \
5360} \
78eb00cc
DR
5361static ssize_t text##_store(struct kmem_cache *s, \
5362 const char *buf, size_t length) \
5363{ \
5364 if (buf[0] != '0') \
5365 return -EINVAL; \
5366 clear_stat(s, si); \
5367 return length; \
5368} \
5369SLAB_ATTR(text); \
8ff12cfc
CL
5370
5371STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5372STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5373STAT_ATTR(FREE_FASTPATH, free_fastpath);
5374STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5375STAT_ATTR(FREE_FROZEN, free_frozen);
5376STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5377STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5378STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5379STAT_ATTR(ALLOC_SLAB, alloc_slab);
5380STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652 5381STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc
CL
5382STAT_ATTR(FREE_SLAB, free_slab);
5383STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5384STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5385STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5386STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5387STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5388STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af 5389STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376a 5390STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef51
CL
5391STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5392STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e22585
CL
5393STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5394STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8028dcea
AS
5395STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5396STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
8ff12cfc
CL
5397#endif
5398
06428780 5399static struct attribute *slab_attrs[] = {
81819f0f
CL
5400 &slab_size_attr.attr,
5401 &object_size_attr.attr,
5402 &objs_per_slab_attr.attr,
5403 &order_attr.attr,
73d342b1 5404 &min_partial_attr.attr,
49e22585 5405 &cpu_partial_attr.attr,
81819f0f 5406 &objects_attr.attr,
205ab99d 5407 &objects_partial_attr.attr,
81819f0f
CL
5408 &partial_attr.attr,
5409 &cpu_slabs_attr.attr,
5410 &ctor_attr.attr,
81819f0f
CL
5411 &aliases_attr.attr,
5412 &align_attr.attr,
81819f0f
CL
5413 &hwcache_align_attr.attr,
5414 &reclaim_account_attr.attr,
5415 &destroy_by_rcu_attr.attr,
a5a84755 5416 &shrink_attr.attr,
49e22585 5417 &slabs_cpu_partial_attr.attr,
ab4d5ed5 5418#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
5419 &total_objects_attr.attr,
5420 &slabs_attr.attr,
5421 &sanity_checks_attr.attr,
5422 &trace_attr.attr,
81819f0f
CL
5423 &red_zone_attr.attr,
5424 &poison_attr.attr,
5425 &store_user_attr.attr,
53e15af0 5426 &validate_attr.attr,
88a420e4
CL
5427 &alloc_calls_attr.attr,
5428 &free_calls_attr.attr,
ab4d5ed5 5429#endif
81819f0f
CL
5430#ifdef CONFIG_ZONE_DMA
5431 &cache_dma_attr.attr,
5432#endif
5433#ifdef CONFIG_NUMA
9824601e 5434 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
5435#endif
5436#ifdef CONFIG_SLUB_STATS
5437 &alloc_fastpath_attr.attr,
5438 &alloc_slowpath_attr.attr,
5439 &free_fastpath_attr.attr,
5440 &free_slowpath_attr.attr,
5441 &free_frozen_attr.attr,
5442 &free_add_partial_attr.attr,
5443 &free_remove_partial_attr.attr,
5444 &alloc_from_partial_attr.attr,
5445 &alloc_slab_attr.attr,
5446 &alloc_refill_attr.attr,
e36a2652 5447 &alloc_node_mismatch_attr.attr,
8ff12cfc
CL
5448 &free_slab_attr.attr,
5449 &cpuslab_flush_attr.attr,
5450 &deactivate_full_attr.attr,
5451 &deactivate_empty_attr.attr,
5452 &deactivate_to_head_attr.attr,
5453 &deactivate_to_tail_attr.attr,
5454 &deactivate_remote_frees_attr.attr,
03e404af 5455 &deactivate_bypass_attr.attr,
65c3376a 5456 &order_fallback_attr.attr,
b789ef51
CL
5457 &cmpxchg_double_fail_attr.attr,
5458 &cmpxchg_double_cpu_fail_attr.attr,
49e22585
CL
5459 &cpu_partial_alloc_attr.attr,
5460 &cpu_partial_free_attr.attr,
8028dcea
AS
5461 &cpu_partial_node_attr.attr,
5462 &cpu_partial_drain_attr.attr,
81819f0f 5463#endif
4c13dd3b
DM
5464#ifdef CONFIG_FAILSLAB
5465 &failslab_attr.attr,
5466#endif
8eb8284b 5467 &usersize_attr.attr,
4c13dd3b 5468
81819f0f
CL
5469 NULL
5470};
5471
1fdaaa23 5472static const struct attribute_group slab_attr_group = {
81819f0f
CL
5473 .attrs = slab_attrs,
5474};
5475
5476static ssize_t slab_attr_show(struct kobject *kobj,
5477 struct attribute *attr,
5478 char *buf)
5479{
5480 struct slab_attribute *attribute;
5481 struct kmem_cache *s;
5482 int err;
5483
5484 attribute = to_slab_attr(attr);
5485 s = to_slab(kobj);
5486
5487 if (!attribute->show)
5488 return -EIO;
5489
5490 err = attribute->show(s, buf);
5491
5492 return err;
5493}
5494
5495static ssize_t slab_attr_store(struct kobject *kobj,
5496 struct attribute *attr,
5497 const char *buf, size_t len)
5498{
5499 struct slab_attribute *attribute;
5500 struct kmem_cache *s;
5501 int err;
5502
5503 attribute = to_slab_attr(attr);
5504 s = to_slab(kobj);
5505
5506 if (!attribute->store)
5507 return -EIO;
5508
5509 err = attribute->store(s, buf, len);
127424c8 5510#ifdef CONFIG_MEMCG
107dab5c 5511 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
426589f5 5512 struct kmem_cache *c;
81819f0f 5513
107dab5c
GC
5514 mutex_lock(&slab_mutex);
5515 if (s->max_attr_size < len)
5516 s->max_attr_size = len;
5517
ebe945c2
GC
5518 /*
5519 * This is a best effort propagation, so this function's return
5520 * value will be determined by the parent cache only. This is
5521 * basically because not all attributes will have a well
5522 * defined semantics for rollbacks - most of the actions will
5523 * have permanent effects.
5524 *
5525 * Returning the error value of any of the children that fail
5526 * is not 100 % defined, in the sense that users seeing the
5527 * error code won't be able to know anything about the state of
5528 * the cache.
5529 *
5530 * Only returning the error code for the parent cache at least
5531 * has well defined semantics. The cache being written to
5532 * directly either failed or succeeded, in which case we loop
5533 * through the descendants with best-effort propagation.
5534 */
426589f5
VD
5535 for_each_memcg_cache(c, s)
5536 attribute->store(c, buf, len);
107dab5c
GC
5537 mutex_unlock(&slab_mutex);
5538 }
5539#endif
81819f0f
CL
5540 return err;
5541}
5542
107dab5c
GC
5543static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5544{
127424c8 5545#ifdef CONFIG_MEMCG
107dab5c
GC
5546 int i;
5547 char *buffer = NULL;
93030d83 5548 struct kmem_cache *root_cache;
107dab5c 5549
93030d83 5550 if (is_root_cache(s))
107dab5c
GC
5551 return;
5552
f7ce3190 5553 root_cache = s->memcg_params.root_cache;
93030d83 5554
107dab5c
GC
5555 /*
5556 * This mean this cache had no attribute written. Therefore, no point
5557 * in copying default values around
5558 */
93030d83 5559 if (!root_cache->max_attr_size)
107dab5c
GC
5560 return;
5561
5562 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5563 char mbuf[64];
5564 char *buf;
5565 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
478fe303 5566 ssize_t len;
107dab5c
GC
5567
5568 if (!attr || !attr->store || !attr->show)
5569 continue;
5570
5571 /*
5572 * It is really bad that we have to allocate here, so we will
5573 * do it only as a fallback. If we actually allocate, though,
5574 * we can just use the allocated buffer until the end.
5575 *
5576 * Most of the slub attributes will tend to be very small in
5577 * size, but sysfs allows buffers up to a page, so they can
5578 * theoretically happen.
5579 */
5580 if (buffer)
5581 buf = buffer;
93030d83 5582 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
107dab5c
GC
5583 buf = mbuf;
5584 else {
5585 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5586 if (WARN_ON(!buffer))
5587 continue;
5588 buf = buffer;
5589 }
5590
478fe303
TG
5591 len = attr->show(root_cache, buf);
5592 if (len > 0)
5593 attr->store(s, buf, len);
107dab5c
GC
5594 }
5595
5596 if (buffer)
5597 free_page((unsigned long)buffer);
5598#endif
5599}
5600
41a21285
CL
5601static void kmem_cache_release(struct kobject *k)
5602{
5603 slab_kmem_cache_release(to_slab(k));
5604}
5605
52cf25d0 5606static const struct sysfs_ops slab_sysfs_ops = {
81819f0f
CL
5607 .show = slab_attr_show,
5608 .store = slab_attr_store,
5609};
5610
5611static struct kobj_type slab_ktype = {
5612 .sysfs_ops = &slab_sysfs_ops,
41a21285 5613 .release = kmem_cache_release,
81819f0f
CL
5614};
5615
5616static int uevent_filter(struct kset *kset, struct kobject *kobj)
5617{
5618 struct kobj_type *ktype = get_ktype(kobj);
5619
5620 if (ktype == &slab_ktype)
5621 return 1;
5622 return 0;
5623}
5624
9cd43611 5625static const struct kset_uevent_ops slab_uevent_ops = {
81819f0f
CL
5626 .filter = uevent_filter,
5627};
5628
27c3a314 5629static struct kset *slab_kset;
81819f0f 5630
9a41707b
VD
5631static inline struct kset *cache_kset(struct kmem_cache *s)
5632{
127424c8 5633#ifdef CONFIG_MEMCG
9a41707b 5634 if (!is_root_cache(s))
f7ce3190 5635 return s->memcg_params.root_cache->memcg_kset;
9a41707b
VD
5636#endif
5637 return slab_kset;
5638}
5639
81819f0f
CL
5640#define ID_STR_LENGTH 64
5641
5642/* Create a unique string id for a slab cache:
6446faa2
CL
5643 *
5644 * Format :[flags-]size
81819f0f
CL
5645 */
5646static char *create_unique_id(struct kmem_cache *s)
5647{
5648 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5649 char *p = name;
5650
5651 BUG_ON(!name);
5652
5653 *p++ = ':';
5654 /*
5655 * First flags affecting slabcache operations. We will only
5656 * get here for aliasable slabs so we do not need to support
5657 * too many flags. The flags here must cover all flags that
5658 * are matched during merging to guarantee that the id is
5659 * unique.
5660 */
5661 if (s->flags & SLAB_CACHE_DMA)
5662 *p++ = 'd';
5663 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5664 *p++ = 'a';
becfda68 5665 if (s->flags & SLAB_CONSISTENCY_CHECKS)
81819f0f 5666 *p++ = 'F';
230e9fc2
VD
5667 if (s->flags & SLAB_ACCOUNT)
5668 *p++ = 'A';
81819f0f
CL
5669 if (p != name + 1)
5670 *p++ = '-';
44065b2e 5671 p += sprintf(p, "%07u", s->size);
2633d7a0 5672
81819f0f
CL
5673 BUG_ON(p > name + ID_STR_LENGTH - 1);
5674 return name;
5675}
5676
3b7b3140
TH
5677static void sysfs_slab_remove_workfn(struct work_struct *work)
5678{
5679 struct kmem_cache *s =
5680 container_of(work, struct kmem_cache, kobj_remove_work);
5681
5682 if (!s->kobj.state_in_sysfs)
5683 /*
5684 * For a memcg cache, this may be called during
5685 * deactivation and again on shutdown. Remove only once.
5686 * A cache is never shut down before deactivation is
5687 * complete, so no need to worry about synchronization.
5688 */
f6ba4880 5689 goto out;
3b7b3140
TH
5690
5691#ifdef CONFIG_MEMCG
5692 kset_unregister(s->memcg_kset);
5693#endif
5694 kobject_uevent(&s->kobj, KOBJ_REMOVE);
f6ba4880 5695out:
3b7b3140
TH
5696 kobject_put(&s->kobj);
5697}
5698
81819f0f
CL
5699static int sysfs_slab_add(struct kmem_cache *s)
5700{
5701 int err;
5702 const char *name;
1663f26d 5703 struct kset *kset = cache_kset(s);
45530c44 5704 int unmergeable = slab_unmergeable(s);
81819f0f 5705
3b7b3140
TH
5706 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5707
1663f26d
TH
5708 if (!kset) {
5709 kobject_init(&s->kobj, &slab_ktype);
5710 return 0;
5711 }
5712
11066386
MC
5713 if (!unmergeable && disable_higher_order_debug &&
5714 (slub_debug & DEBUG_METADATA_FLAGS))
5715 unmergeable = 1;
5716
81819f0f
CL
5717 if (unmergeable) {
5718 /*
5719 * Slabcache can never be merged so we can use the name proper.
5720 * This is typically the case for debug situations. In that
5721 * case we can catch duplicate names easily.
5722 */
27c3a314 5723 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
5724 name = s->name;
5725 } else {
5726 /*
5727 * Create a unique name for the slab as a target
5728 * for the symlinks.
5729 */
5730 name = create_unique_id(s);
5731 }
5732
1663f26d 5733 s->kobj.kset = kset;
26e4f205 5734 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
54b6a731 5735 if (err)
80da026a 5736 goto out;
81819f0f
CL
5737
5738 err = sysfs_create_group(&s->kobj, &slab_attr_group);
54b6a731
DJ
5739 if (err)
5740 goto out_del_kobj;
9a41707b 5741
127424c8 5742#ifdef CONFIG_MEMCG
1663f26d 5743 if (is_root_cache(s) && memcg_sysfs_enabled) {
9a41707b
VD
5744 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5745 if (!s->memcg_kset) {
54b6a731
DJ
5746 err = -ENOMEM;
5747 goto out_del_kobj;
9a41707b
VD
5748 }
5749 }
5750#endif
5751
81819f0f
CL
5752 kobject_uevent(&s->kobj, KOBJ_ADD);
5753 if (!unmergeable) {
5754 /* Setup first alias */
5755 sysfs_slab_alias(s, s->name);
81819f0f 5756 }
54b6a731
DJ
5757out:
5758 if (!unmergeable)
5759 kfree(name);
5760 return err;
5761out_del_kobj:
5762 kobject_del(&s->kobj);
54b6a731 5763 goto out;
81819f0f
CL
5764}
5765
bf5eb3de 5766static void sysfs_slab_remove(struct kmem_cache *s)
81819f0f 5767{
97d06609 5768 if (slab_state < FULL)
2bce6485
CL
5769 /*
5770 * Sysfs has not been setup yet so no need to remove the
5771 * cache from sysfs.
5772 */
5773 return;
5774
3b7b3140
TH
5775 kobject_get(&s->kobj);
5776 schedule_work(&s->kobj_remove_work);
bf5eb3de
TH
5777}
5778
d50d82fa
MP
5779void sysfs_slab_unlink(struct kmem_cache *s)
5780{
5781 if (slab_state >= FULL)
5782 kobject_del(&s->kobj);
5783}
5784
bf5eb3de
TH
5785void sysfs_slab_release(struct kmem_cache *s)
5786{
5787 if (slab_state >= FULL)
5788 kobject_put(&s->kobj);
81819f0f
CL
5789}
5790
5791/*
5792 * Need to buffer aliases during bootup until sysfs becomes
9f6c708e 5793 * available lest we lose that information.
81819f0f
CL
5794 */
5795struct saved_alias {
5796 struct kmem_cache *s;
5797 const char *name;
5798 struct saved_alias *next;
5799};
5800
5af328a5 5801static struct saved_alias *alias_list;
81819f0f
CL
5802
5803static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5804{
5805 struct saved_alias *al;
5806
97d06609 5807 if (slab_state == FULL) {
81819f0f
CL
5808 /*
5809 * If we have a leftover link then remove it.
5810 */
27c3a314
GKH
5811 sysfs_remove_link(&slab_kset->kobj, name);
5812 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
5813 }
5814
5815 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5816 if (!al)
5817 return -ENOMEM;
5818
5819 al->s = s;
5820 al->name = name;
5821 al->next = alias_list;
5822 alias_list = al;
5823 return 0;
5824}
5825
5826static int __init slab_sysfs_init(void)
5827{
5b95a4ac 5828 struct kmem_cache *s;
81819f0f
CL
5829 int err;
5830
18004c5d 5831 mutex_lock(&slab_mutex);
2bce6485 5832
0ff21e46 5833 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 5834 if (!slab_kset) {
18004c5d 5835 mutex_unlock(&slab_mutex);
f9f58285 5836 pr_err("Cannot register slab subsystem.\n");
81819f0f
CL
5837 return -ENOSYS;
5838 }
5839
97d06609 5840 slab_state = FULL;
26a7bd03 5841
5b95a4ac 5842 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 5843 err = sysfs_slab_add(s);
5d540fb7 5844 if (err)
f9f58285
FF
5845 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5846 s->name);
26a7bd03 5847 }
81819f0f
CL
5848
5849 while (alias_list) {
5850 struct saved_alias *al = alias_list;
5851
5852 alias_list = alias_list->next;
5853 err = sysfs_slab_alias(al->s, al->name);
5d540fb7 5854 if (err)
f9f58285
FF
5855 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5856 al->name);
81819f0f
CL
5857 kfree(al);
5858 }
5859
18004c5d 5860 mutex_unlock(&slab_mutex);
81819f0f
CL
5861 resiliency_test();
5862 return 0;
5863}
5864
5865__initcall(slab_sysfs_init);
ab4d5ed5 5866#endif /* CONFIG_SYSFS */
57ed3eda
PE
5867
5868/*
5869 * The /proc/slabinfo ABI
5870 */
5b365771 5871#ifdef CONFIG_SLUB_DEBUG
0d7561c6 5872void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57ed3eda 5873{
57ed3eda 5874 unsigned long nr_slabs = 0;
205ab99d
CL
5875 unsigned long nr_objs = 0;
5876 unsigned long nr_free = 0;
57ed3eda 5877 int node;
fa45dc25 5878 struct kmem_cache_node *n;
57ed3eda 5879
fa45dc25 5880 for_each_kmem_cache_node(s, node, n) {
c17fd13e
WL
5881 nr_slabs += node_nr_slabs(n);
5882 nr_objs += node_nr_objs(n);
205ab99d 5883 nr_free += count_partial(n, count_free);
57ed3eda
PE
5884 }
5885
0d7561c6
GC
5886 sinfo->active_objs = nr_objs - nr_free;
5887 sinfo->num_objs = nr_objs;
5888 sinfo->active_slabs = nr_slabs;
5889 sinfo->num_slabs = nr_slabs;
5890 sinfo->objects_per_slab = oo_objects(s->oo);
5891 sinfo->cache_order = oo_order(s->oo);
57ed3eda
PE
5892}
5893
0d7561c6 5894void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7b3c3a50 5895{
7b3c3a50
AD
5896}
5897
b7454ad3
GC
5898ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5899 size_t count, loff_t *ppos)
7b3c3a50 5900{
b7454ad3 5901 return -EIO;
7b3c3a50 5902}
5b365771 5903#endif /* CONFIG_SLUB_DEBUG */