Merge tag 'staging-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-2.6-block.git] / mm / slob.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
10cef602
MM
2/*
3 * SLOB Allocator: Simple List Of Blocks
4 *
5 * Matt Mackall <mpm@selenic.com> 12/30/03
6 *
6193a2ff
PM
7 * NUMA support by Paul Mundt, 2007.
8 *
10cef602
MM
9 * How SLOB works:
10 *
11 * The core of SLOB is a traditional K&R style heap allocator, with
12 * support for returning aligned objects. The granularity of this
55394849
NP
13 * allocator is as little as 2 bytes, however typically most architectures
14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127 15 *
20cecbae
MM
16 * The slob heap is a set of linked list of pages from alloc_pages(),
17 * and within each page, there is a singly-linked list of free blocks
18 * (slob_t). The heap is grown on demand. To reduce fragmentation,
19 * heap pages are segregated into three lists, with objects less than
20 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 *
22 * Allocation from heap involves first searching for a page with
23 * sufficient free blocks (using a next-fit-like approach) followed by
24 * a first-fit scan of the page. Deallocation inserts objects back
25 * into the free list in address order, so this is effectively an
26 * address-ordered first fit.
10cef602
MM
27 *
28 * Above this is an implementation of kmalloc/kfree. Blocks returned
55394849 29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef602 30 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff 31 * alloc_pages() directly, allocating compound pages so the page order
999d8795
EG
32 * does not have to be separately tracked.
33 * These objects are detected in kfree() because PageSlab()
d87a133f 34 * is false for them.
10cef602
MM
35 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127
NP
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
6193a2ff 41 * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127 42 * size bookkeeping is necessary and there is essentially no allocation
d87a133f
NP
43 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
6193a2ff
PM
45 *
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
96db800f 49 * provided, __alloc_pages_node() with the specified node id is used
6193a2ff
PM
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
52 *
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
10cef602
MM
58 */
59
95b35127 60#include <linux/kernel.h>
10cef602 61#include <linux/slab.h>
97d06609 62
10cef602 63#include <linux/mm.h>
1f0532eb 64#include <linux/swap.h> /* struct reclaim_state */
10cef602
MM
65#include <linux/cache.h>
66#include <linux/init.h>
b95f1b31 67#include <linux/export.h>
afc0cedb 68#include <linux/rcupdate.h>
95b35127 69#include <linux/list.h>
4374e616 70#include <linux/kmemleak.h>
039ca4e7
LZ
71
72#include <trace/events/kmem.h>
73
60063497 74#include <linux/atomic.h>
95b35127 75
b9ce5ef4 76#include "slab.h"
95b35127
NP
77/*
78 * slob_block has a field 'units', which indicates size of block if +ve,
79 * or offset of next block if -ve (in SLOB_UNITs).
80 *
81 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * Those with larger size contain their size in the first SLOB_UNIT of
83 * memory, and the offset of the next free block in the second SLOB_UNIT.
84 */
55394849 85#if PAGE_SIZE <= (32767 * 2)
95b35127
NP
86typedef s16 slobidx_t;
87#else
88typedef s32 slobidx_t;
89#endif
90
10cef602 91struct slob_block {
95b35127 92 slobidx_t units;
55394849 93};
10cef602
MM
94typedef struct slob_block slob_t;
95
95b35127 96/*
20cecbae 97 * All partially free slob pages go on these lists.
95b35127 98 */
20cecbae
MM
99#define SLOB_BREAK1 256
100#define SLOB_BREAK2 1024
101static LIST_HEAD(free_slob_small);
102static LIST_HEAD(free_slob_medium);
103static LIST_HEAD(free_slob_large);
95b35127 104
95b35127
NP
105/*
106 * slob_page_free: true for pages on free_slob_pages list.
107 */
b8c24c4a 108static inline int slob_page_free(struct page *sp)
95b35127 109{
b8c24c4a 110 return PageSlobFree(sp);
95b35127
NP
111}
112
b8c24c4a 113static void set_slob_page_free(struct page *sp, struct list_head *list)
95b35127 114{
adab7b68 115 list_add(&sp->slab_list, list);
b8c24c4a 116 __SetPageSlobFree(sp);
95b35127
NP
117}
118
b8c24c4a 119static inline void clear_slob_page_free(struct page *sp)
95b35127 120{
adab7b68 121 list_del(&sp->slab_list);
b8c24c4a 122 __ClearPageSlobFree(sp);
95b35127
NP
123}
124
10cef602 125#define SLOB_UNIT sizeof(slob_t)
a6d78159 126#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
10cef602 127
afc0cedb
NP
128/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
5f0d5a3a 130 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
afc0cedb
NP
131 * the block using call_rcu.
132 */
133struct slob_rcu {
134 struct rcu_head head;
135 int size;
136};
137
95b35127
NP
138/*
139 * slob_lock protects all slob allocator structures.
140 */
10cef602 141static DEFINE_SPINLOCK(slob_lock);
10cef602 142
95b35127
NP
143/*
144 * Encode the given size and next info into a free slob block s.
145 */
146static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147{
148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149 slobidx_t offset = next - base;
bcb4ddb4 150
95b35127
NP
151 if (size > 1) {
152 s[0].units = size;
153 s[1].units = offset;
154 } else
155 s[0].units = -offset;
156}
10cef602 157
95b35127
NP
158/*
159 * Return the size of a slob block.
160 */
161static slobidx_t slob_units(slob_t *s)
162{
163 if (s->units > 0)
164 return s->units;
165 return 1;
166}
167
168/*
169 * Return the next free slob block pointer after this one.
170 */
171static slob_t *slob_next(slob_t *s)
172{
173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174 slobidx_t next;
175
176 if (s[0].units < 0)
177 next = -s[0].units;
178 else
179 next = s[1].units;
180 return base+next;
181}
182
183/*
184 * Returns true if s is the last free block in its page.
185 */
186static int slob_last(slob_t *s)
187{
188 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189}
190
6e9ed0cc 191static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff
PM
192{
193 void *page;
194
195#ifdef CONFIG_NUMA
90f2cbbc 196 if (node != NUMA_NO_NODE)
96db800f 197 page = __alloc_pages_node(node, gfp, order);
6193a2ff
PM
198 else
199#endif
200 page = alloc_pages(gfp, order);
201
202 if (!page)
203 return NULL;
204
205 return page_address(page);
206}
207
6e9ed0cc
AW
208static void slob_free_pages(void *b, int order)
209{
1f0532eb
NP
210 if (current->reclaim_state)
211 current->reclaim_state->reclaimed_slab += 1 << order;
6e9ed0cc
AW
212 free_pages((unsigned long)b, order);
213}
214
95b35127 215/*
130e8e09
TH
216 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
217 * @sp: Page to look in.
218 * @size: Size of the allocation.
219 * @align: Allocation alignment.
220 * @page_removed_from_list: Return parameter.
221 *
222 * Tries to find a chunk of memory at least @size bytes big within @page.
223 *
224 * Return: Pointer to memory if allocated, %NULL otherwise. If the
225 * allocation fills up @page then the page is removed from the
226 * freelist, in this case @page_removed_from_list will be set to
227 * true (set to false otherwise).
95b35127 228 */
130e8e09
TH
229static void *slob_page_alloc(struct page *sp, size_t size, int align,
230 bool *page_removed_from_list)
10cef602 231{
6e9ed0cc 232 slob_t *prev, *cur, *aligned = NULL;
10cef602 233 int delta = 0, units = SLOB_UNITS(size);
10cef602 234
130e8e09 235 *page_removed_from_list = false;
b8c24c4a 236 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
95b35127
NP
237 slobidx_t avail = slob_units(cur);
238
10cef602
MM
239 if (align) {
240 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
241 delta = aligned - cur;
242 }
95b35127
NP
243 if (avail >= units + delta) { /* room enough? */
244 slob_t *next;
245
10cef602 246 if (delta) { /* need to fragment head to align? */
95b35127
NP
247 next = slob_next(cur);
248 set_slob(aligned, avail - delta, next);
249 set_slob(cur, delta, aligned);
10cef602
MM
250 prev = cur;
251 cur = aligned;
95b35127 252 avail = slob_units(cur);
10cef602
MM
253 }
254
95b35127
NP
255 next = slob_next(cur);
256 if (avail == units) { /* exact fit? unlink. */
257 if (prev)
258 set_slob(prev, slob_units(prev), next);
259 else
b8c24c4a 260 sp->freelist = next;
95b35127
NP
261 } else { /* fragment */
262 if (prev)
263 set_slob(prev, slob_units(prev), cur + units);
264 else
b8c24c4a 265 sp->freelist = cur + units;
95b35127 266 set_slob(cur + units, avail - units, next);
10cef602
MM
267 }
268
95b35127 269 sp->units -= units;
130e8e09 270 if (!sp->units) {
95b35127 271 clear_slob_page_free(sp);
130e8e09
TH
272 *page_removed_from_list = true;
273 }
10cef602
MM
274 return cur;
275 }
95b35127
NP
276 if (slob_last(cur))
277 return NULL;
278 }
279}
10cef602 280
95b35127
NP
281/*
282 * slob_alloc: entry point into the slob allocator.
283 */
6193a2ff 284static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
95b35127 285{
b8c24c4a 286 struct page *sp;
20cecbae 287 struct list_head *slob_list;
95b35127
NP
288 slob_t *b = NULL;
289 unsigned long flags;
130e8e09 290 bool _unused;
10cef602 291
20cecbae
MM
292 if (size < SLOB_BREAK1)
293 slob_list = &free_slob_small;
294 else if (size < SLOB_BREAK2)
295 slob_list = &free_slob_medium;
296 else
297 slob_list = &free_slob_large;
298
95b35127
NP
299 spin_lock_irqsave(&slob_lock, flags);
300 /* Iterate through each partially free page, try to find room */
adab7b68 301 list_for_each_entry(sp, slob_list, slab_list) {
130e8e09 302 bool page_removed_from_list = false;
6193a2ff
PM
303#ifdef CONFIG_NUMA
304 /*
305 * If there's a node specification, search for a partial
306 * page with a matching node id in the freelist.
307 */
90f2cbbc 308 if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
6193a2ff
PM
309 continue;
310#endif
d6269543
MM
311 /* Enough room on this page? */
312 if (sp->units < SLOB_UNITS(size))
313 continue;
6193a2ff 314
130e8e09 315 b = slob_page_alloc(sp, size, align, &page_removed_from_list);
d6269543
MM
316 if (!b)
317 continue;
318
130e8e09
TH
319 /*
320 * If slob_page_alloc() removed sp from the list then we
321 * cannot call list functions on sp. If so allocation
322 * did not fragment the page anyway so optimisation is
323 * unnecessary.
324 */
325 if (!page_removed_from_list) {
326 /*
327 * Improve fragment distribution and reduce our average
328 * search time by starting our next search here. (see
329 * Knuth vol 1, sec 2.5, pg 449)
330 */
adab7b68
TH
331 if (!list_is_first(&sp->slab_list, slob_list))
332 list_rotate_to_front(&sp->slab_list, slob_list);
130e8e09 333 }
d6269543 334 break;
10cef602 335 }
95b35127
NP
336 spin_unlock_irqrestore(&slob_lock, flags);
337
338 /* Not enough space: must allocate a new page */
339 if (!b) {
6e9ed0cc 340 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127 341 if (!b)
6e9ed0cc 342 return NULL;
b5568280
CL
343 sp = virt_to_page(b);
344 __SetPageSlab(sp);
95b35127
NP
345
346 spin_lock_irqsave(&slob_lock, flags);
347 sp->units = SLOB_UNITS(PAGE_SIZE);
b8c24c4a 348 sp->freelist = b;
adab7b68 349 INIT_LIST_HEAD(&sp->slab_list);
95b35127 350 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae 351 set_slob_page_free(sp, slob_list);
130e8e09 352 b = slob_page_alloc(sp, size, align, &_unused);
95b35127
NP
353 BUG_ON(!b);
354 spin_unlock_irqrestore(&slob_lock, flags);
355 }
9f88faee 356 if (unlikely(gfp & __GFP_ZERO))
d07dbea4 357 memset(b, 0, size);
95b35127 358 return b;
10cef602
MM
359}
360
95b35127
NP
361/*
362 * slob_free: entry point into the slob allocator.
363 */
10cef602
MM
364static void slob_free(void *block, int size)
365{
b8c24c4a 366 struct page *sp;
95b35127
NP
367 slob_t *prev, *next, *b = (slob_t *)block;
368 slobidx_t units;
10cef602 369 unsigned long flags;
d602daba 370 struct list_head *slob_list;
10cef602 371
2408c550 372 if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef602 373 return;
95b35127 374 BUG_ON(!size);
10cef602 375
b5568280 376 sp = virt_to_page(block);
95b35127 377 units = SLOB_UNITS(size);
10cef602 378
10cef602 379 spin_lock_irqsave(&slob_lock, flags);
10cef602 380
95b35127
NP
381 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
382 /* Go directly to page allocator. Do not pass slob allocator */
383 if (slob_page_free(sp))
384 clear_slob_page_free(sp);
6fb8f424 385 spin_unlock_irqrestore(&slob_lock, flags);
b5568280 386 __ClearPageSlab(sp);
22b751c3 387 page_mapcount_reset(sp);
1f0532eb 388 slob_free_pages(b, 0);
6fb8f424 389 return;
95b35127 390 }
10cef602 391
95b35127
NP
392 if (!slob_page_free(sp)) {
393 /* This slob page is about to become partially free. Easy! */
394 sp->units = units;
b8c24c4a 395 sp->freelist = b;
95b35127
NP
396 set_slob(b, units,
397 (void *)((unsigned long)(b +
398 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602daba
BL
399 if (size < SLOB_BREAK1)
400 slob_list = &free_slob_small;
401 else if (size < SLOB_BREAK2)
402 slob_list = &free_slob_medium;
403 else
404 slob_list = &free_slob_large;
405 set_slob_page_free(sp, slob_list);
95b35127
NP
406 goto out;
407 }
408
409 /*
410 * Otherwise the page is already partially free, so find reinsertion
411 * point.
412 */
413 sp->units += units;
10cef602 414
b8c24c4a
CL
415 if (b < (slob_t *)sp->freelist) {
416 if (b + units == sp->freelist) {
417 units += slob_units(sp->freelist);
418 sp->freelist = slob_next(sp->freelist);
679299b3 419 }
b8c24c4a
CL
420 set_slob(b, units, sp->freelist);
421 sp->freelist = b;
95b35127 422 } else {
b8c24c4a 423 prev = sp->freelist;
95b35127
NP
424 next = slob_next(prev);
425 while (b > next) {
426 prev = next;
427 next = slob_next(prev);
428 }
10cef602 429
95b35127
NP
430 if (!slob_last(prev) && b + units == next) {
431 units += slob_units(next);
432 set_slob(b, units, slob_next(next));
433 } else
434 set_slob(b, units, next);
435
436 if (prev + slob_units(prev) == b) {
437 units = slob_units(b) + slob_units(prev);
438 set_slob(prev, units, slob_next(b));
439 } else
440 set_slob(prev, slob_units(prev), b);
441 }
442out:
10cef602
MM
443 spin_unlock_irqrestore(&slob_lock, flags);
444}
445
95b35127
NP
446/*
447 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
448 */
449
f3f74101
EG
450static __always_inline void *
451__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
10cef602 452{
6cb8f913 453 unsigned int *m;
789306e5 454 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb2 455 void *ret;
55394849 456
bd50cfa8
SR
457 gfp &= gfp_allowed_mask;
458
d92a8cfc
PZ
459 fs_reclaim_acquire(gfp);
460 fs_reclaim_release(gfp);
cf40bd16 461
55394849 462 if (size < PAGE_SIZE - align) {
6cb8f913
CL
463 if (!size)
464 return ZERO_SIZE_PTR;
465
6193a2ff 466 m = slob_alloc(size + align, gfp, align, node);
3eae2cb2 467
239f49c0
MK
468 if (!m)
469 return NULL;
470 *m = size;
3eae2cb2
EGM
471 ret = (void *)m + align;
472
f3f74101 473 trace_kmalloc_node(caller, ret,
ca2b84cb 474 size, size + align, gfp, node);
d87a133f 475 } else {
3eae2cb2 476 unsigned int order = get_order(size);
d87a133f 477
8df275af
DR
478 if (likely(order))
479 gfp |= __GFP_COMP;
480 ret = slob_new_pages(gfp, order, node);
3eae2cb2 481
f3f74101 482 trace_kmalloc_node(caller, ret,
ca2b84cb 483 size, PAGE_SIZE << order, gfp, node);
10cef602 484 }
3eae2cb2 485
4374e616 486 kmemleak_alloc(ret, size, 1, gfp);
3eae2cb2 487 return ret;
10cef602 488}
f3f74101 489
f1b6eb6e 490void *__kmalloc(size_t size, gfp_t gfp)
f3f74101 491{
f1b6eb6e 492 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
f3f74101 493}
f1b6eb6e 494EXPORT_SYMBOL(__kmalloc);
10cef602 495
f3f74101
EG
496void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
497{
498 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
499}
500
501#ifdef CONFIG_NUMA
82bd5508 502void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
f3f74101
EG
503 int node, unsigned long caller)
504{
505 return __do_kmalloc_node(size, gfp, node, caller);
506}
507#endif
f3f74101 508
10cef602
MM
509void kfree(const void *block)
510{
b8c24c4a 511 struct page *sp;
10cef602 512
2121db74
PE
513 trace_kfree(_RET_IP_, block);
514
2408c550 515 if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef602 516 return;
4374e616 517 kmemleak_free(block);
10cef602 518
b5568280
CL
519 sp = virt_to_page(block);
520 if (PageSlab(sp)) {
789306e5 521 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
55394849
NP
522 unsigned int *m = (unsigned int *)(block - align);
523 slob_free(m, *m + align);
d87a133f 524 } else
8cf9864b 525 __free_pages(sp, compound_order(sp));
10cef602 526}
10cef602
MM
527EXPORT_SYMBOL(kfree);
528
d87a133f 529/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
10d1f8cb 530size_t __ksize(const void *block)
10cef602 531{
b8c24c4a 532 struct page *sp;
999d8795
EG
533 int align;
534 unsigned int *m;
10cef602 535
ef8b4520
CL
536 BUG_ON(!block);
537 if (unlikely(block == ZERO_SIZE_PTR))
10cef602
MM
538 return 0;
539
b5568280 540 sp = virt_to_page(block);
999d8795
EG
541 if (unlikely(!PageSlab(sp)))
542 return PAGE_SIZE << compound_order(sp);
543
789306e5 544 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
999d8795
EG
545 m = (unsigned int *)(block - align);
546 return SLOB_UNITS(*m) * SLOB_UNIT;
10cef602 547}
10d1f8cb 548EXPORT_SYMBOL(__ksize);
10cef602 549
d50112ed 550int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
10cef602 551{
5f0d5a3a 552 if (flags & SLAB_TYPESAFE_BY_RCU) {
278b1bb1
CL
553 /* leave room for rcu footer at the end of object */
554 c->size += sizeof(struct slob_rcu);
039363f3 555 }
278b1bb1 556 c->flags = flags;
278b1bb1 557 return 0;
10cef602 558}
10cef602 559
c21a6daf 560static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef602
MM
561{
562 void *b;
563
bd50cfa8
SR
564 flags &= gfp_allowed_mask;
565
d92a8cfc
PZ
566 fs_reclaim_acquire(flags);
567 fs_reclaim_release(flags);
bd50cfa8 568
3eae2cb2 569 if (c->size < PAGE_SIZE) {
6193a2ff 570 b = slob_alloc(c->size, flags, c->align, node);
fe74fe2b 571 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb
EGM
572 SLOB_UNITS(c->size) * SLOB_UNIT,
573 flags, node);
3eae2cb2 574 } else {
6e9ed0cc 575 b = slob_new_pages(flags, get_order(c->size), node);
fe74fe2b 576 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb
EGM
577 PAGE_SIZE << get_order(c->size),
578 flags, node);
3eae2cb2 579 }
10cef602 580
128227e7
MW
581 if (b && c->ctor) {
582 WARN_ON_ONCE(flags & __GFP_ZERO);
51cc5068 583 c->ctor(b);
128227e7 584 }
10cef602 585
4374e616 586 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef602
MM
587 return b;
588}
f1b6eb6e
CL
589
590void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
591{
592 return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
593}
594EXPORT_SYMBOL(kmem_cache_alloc);
595
596#ifdef CONFIG_NUMA
597void *__kmalloc_node(size_t size, gfp_t gfp, int node)
598{
599 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
600}
601EXPORT_SYMBOL(__kmalloc_node);
602
603void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
604{
605 return slob_alloc_node(cachep, gfp, node);
606}
6193a2ff 607EXPORT_SYMBOL(kmem_cache_alloc_node);
f1b6eb6e 608#endif
10cef602 609
afc0cedb 610static void __kmem_cache_free(void *b, int size)
10cef602 611{
afc0cedb
NP
612 if (size < PAGE_SIZE)
613 slob_free(b, size);
10cef602 614 else
6e9ed0cc 615 slob_free_pages(b, get_order(size));
afc0cedb
NP
616}
617
618static void kmem_rcu_free(struct rcu_head *head)
619{
620 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
621 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
622
623 __kmem_cache_free(b, slob_rcu->size);
624}
625
626void kmem_cache_free(struct kmem_cache *c, void *b)
627{
4374e616 628 kmemleak_free_recursive(b, c->flags);
5f0d5a3a 629 if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
afc0cedb
NP
630 struct slob_rcu *slob_rcu;
631 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedb
NP
632 slob_rcu->size = c->size;
633 call_rcu(&slob_rcu->head, kmem_rcu_free);
634 } else {
afc0cedb
NP
635 __kmem_cache_free(b, c->size);
636 }
3eae2cb2 637
ca2b84cb 638 trace_kmem_cache_free(_RET_IP_, b);
10cef602
MM
639}
640EXPORT_SYMBOL(kmem_cache_free);
641
484748f0
CL
642void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
643{
644 __kmem_cache_free_bulk(s, size, p);
645}
646EXPORT_SYMBOL(kmem_cache_free_bulk);
647
865762a8 648int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
484748f0
CL
649 void **p)
650{
651 return __kmem_cache_alloc_bulk(s, flags, size, p);
652}
653EXPORT_SYMBOL(kmem_cache_alloc_bulk);
654
945cf2b6
CL
655int __kmem_cache_shutdown(struct kmem_cache *c)
656{
657 /* No way to check for remaining objects */
658 return 0;
659}
660
52b4b950
DS
661void __kmem_cache_release(struct kmem_cache *c)
662{
663}
664
89e364db 665int __kmem_cache_shrink(struct kmem_cache *d)
2e892f43
CL
666{
667 return 0;
668}
2e892f43 669
9b030cb8
CL
670struct kmem_cache kmem_cache_boot = {
671 .name = "kmem_cache",
672 .size = sizeof(struct kmem_cache),
673 .flags = SLAB_PANIC,
674 .align = ARCH_KMALLOC_MINALIGN,
675};
676
bcb4ddb4
DG
677void __init kmem_cache_init(void)
678{
9b030cb8 679 kmem_cache = &kmem_cache_boot;
97d06609 680 slab_state = UP;
10cef602 681}
bbff2e43
WF
682
683void __init kmem_cache_init_late(void)
684{
97d06609 685 slab_state = FULL;
bbff2e43 686}