slob: remove bigblock tracking
[linux-2.6-block.git] / mm / slob.c
CommitLineData
10cef602
MM
1/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
6 * How SLOB works:
7 *
8 * The core of SLOB is a traditional K&R style heap allocator, with
9 * support for returning aligned objects. The granularity of this
95b35127
NP
10 * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it
11 * could be as low as 2 if the compiler alignment requirements allow.
12 *
13 * The slob heap is a linked list of pages from __get_free_page, and
14 * within each page, there is a singly-linked list of free blocks (slob_t).
15 * The heap is grown on demand and allocation from the heap is currently
16 * first-fit.
10cef602
MM
17 *
18 * Above this is an implementation of kmalloc/kfree. Blocks returned
95b35127 19 * from kmalloc are 4-byte aligned and prepended with a 4-byte header.
10cef602 20 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
d87a133f
NP
21 * __get_free_pages directly, allocating compound pages so the page order
22 * does not have to be separately tracked, and also stores the exact
23 * allocation size in page->private so that it can be used to accurately
24 * provide ksize(). These objects are detected in kfree() because slob_page()
25 * is false for them.
10cef602
MM
26 *
27 * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127
NP
28 * destructors for every SLAB allocation. Objects are returned with the
29 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
30 * case the low-level allocator will fragment blocks to create the proper
31 * alignment. Again, objects of page-size or greater are allocated by
32 * calling __get_free_pages. As SLAB objects know their size, no separate
33 * size bookkeeping is necessary and there is essentially no allocation
d87a133f
NP
34 * space overhead, and compound pages aren't needed for multi-page
35 * allocations.
10cef602
MM
36 */
37
95b35127 38#include <linux/kernel.h>
10cef602
MM
39#include <linux/slab.h>
40#include <linux/mm.h>
41#include <linux/cache.h>
42#include <linux/init.h>
43#include <linux/module.h>
afc0cedb 44#include <linux/rcupdate.h>
95b35127
NP
45#include <linux/list.h>
46#include <asm/atomic.h>
47
48/* SLOB_MIN_ALIGN == sizeof(long) */
49#if BITS_PER_BYTE == 32
50#define SLOB_MIN_ALIGN 4
51#else
52#define SLOB_MIN_ALIGN 8
53#endif
10cef602 54
95b35127
NP
55/*
56 * slob_block has a field 'units', which indicates size of block if +ve,
57 * or offset of next block if -ve (in SLOB_UNITs).
58 *
59 * Free blocks of size 1 unit simply contain the offset of the next block.
60 * Those with larger size contain their size in the first SLOB_UNIT of
61 * memory, and the offset of the next free block in the second SLOB_UNIT.
62 */
63#if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN)
64typedef s16 slobidx_t;
65#else
66typedef s32 slobidx_t;
67#endif
68
69/*
70 * Align struct slob_block to long for now, but can some embedded
71 * architectures get away with less?
72 */
10cef602 73struct slob_block {
95b35127
NP
74 slobidx_t units;
75} __attribute__((aligned(SLOB_MIN_ALIGN)));
10cef602
MM
76typedef struct slob_block slob_t;
77
95b35127
NP
78/*
79 * We use struct page fields to manage some slob allocation aspects,
80 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
81 * just define our own struct page type variant here.
82 */
83struct slob_page {
84 union {
85 struct {
86 unsigned long flags; /* mandatory */
87 atomic_t _count; /* mandatory */
88 slobidx_t units; /* free units left in page */
89 unsigned long pad[2];
90 slob_t *free; /* first free slob_t in page */
91 struct list_head list; /* linked list of free pages */
92 };
93 struct page page;
94 };
95};
96static inline void struct_slob_page_wrong_size(void)
97{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
98
99/*
100 * free_slob_page: call before a slob_page is returned to the page allocator.
101 */
102static inline void free_slob_page(struct slob_page *sp)
103{
104 reset_page_mapcount(&sp->page);
105 sp->page.mapping = NULL;
106}
107
108/*
109 * All (partially) free slob pages go on this list.
110 */
111static LIST_HEAD(free_slob_pages);
112
113/*
114 * slob_page: True for all slob pages (false for bigblock pages)
115 */
116static inline int slob_page(struct slob_page *sp)
117{
118 return test_bit(PG_active, &sp->flags);
119}
120
121static inline void set_slob_page(struct slob_page *sp)
122{
123 __set_bit(PG_active, &sp->flags);
124}
125
126static inline void clear_slob_page(struct slob_page *sp)
127{
128 __clear_bit(PG_active, &sp->flags);
129}
130
131/*
132 * slob_page_free: true for pages on free_slob_pages list.
133 */
134static inline int slob_page_free(struct slob_page *sp)
135{
136 return test_bit(PG_private, &sp->flags);
137}
138
139static inline void set_slob_page_free(struct slob_page *sp)
140{
141 list_add(&sp->list, &free_slob_pages);
142 __set_bit(PG_private, &sp->flags);
143}
144
145static inline void clear_slob_page_free(struct slob_page *sp)
146{
147 list_del(&sp->list);
148 __clear_bit(PG_private, &sp->flags);
149}
150
10cef602
MM
151#define SLOB_UNIT sizeof(slob_t)
152#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
153#define SLOB_ALIGN L1_CACHE_BYTES
154
afc0cedb
NP
155/*
156 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
157 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
158 * the block using call_rcu.
159 */
160struct slob_rcu {
161 struct rcu_head head;
162 int size;
163};
164
95b35127
NP
165/*
166 * slob_lock protects all slob allocator structures.
167 */
10cef602 168static DEFINE_SPINLOCK(slob_lock);
10cef602 169
95b35127
NP
170/*
171 * Encode the given size and next info into a free slob block s.
172 */
173static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
174{
175 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
176 slobidx_t offset = next - base;
bcb4ddb4 177
95b35127
NP
178 if (size > 1) {
179 s[0].units = size;
180 s[1].units = offset;
181 } else
182 s[0].units = -offset;
183}
10cef602 184
95b35127
NP
185/*
186 * Return the size of a slob block.
187 */
188static slobidx_t slob_units(slob_t *s)
189{
190 if (s->units > 0)
191 return s->units;
192 return 1;
193}
194
195/*
196 * Return the next free slob block pointer after this one.
197 */
198static slob_t *slob_next(slob_t *s)
199{
200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
201 slobidx_t next;
202
203 if (s[0].units < 0)
204 next = -s[0].units;
205 else
206 next = s[1].units;
207 return base+next;
208}
209
210/*
211 * Returns true if s is the last free block in its page.
212 */
213static int slob_last(slob_t *s)
214{
215 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
216}
217
218/*
219 * Allocate a slob block within a given slob_page sp.
220 */
221static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
10cef602
MM
222{
223 slob_t *prev, *cur, *aligned = 0;
224 int delta = 0, units = SLOB_UNITS(size);
10cef602 225
95b35127
NP
226 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
227 slobidx_t avail = slob_units(cur);
228
10cef602
MM
229 if (align) {
230 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
231 delta = aligned - cur;
232 }
95b35127
NP
233 if (avail >= units + delta) { /* room enough? */
234 slob_t *next;
235
10cef602 236 if (delta) { /* need to fragment head to align? */
95b35127
NP
237 next = slob_next(cur);
238 set_slob(aligned, avail - delta, next);
239 set_slob(cur, delta, aligned);
10cef602
MM
240 prev = cur;
241 cur = aligned;
95b35127 242 avail = slob_units(cur);
10cef602
MM
243 }
244
95b35127
NP
245 next = slob_next(cur);
246 if (avail == units) { /* exact fit? unlink. */
247 if (prev)
248 set_slob(prev, slob_units(prev), next);
249 else
250 sp->free = next;
251 } else { /* fragment */
252 if (prev)
253 set_slob(prev, slob_units(prev), cur + units);
254 else
255 sp->free = cur + units;
256 set_slob(cur + units, avail - units, next);
10cef602
MM
257 }
258
95b35127
NP
259 sp->units -= units;
260 if (!sp->units)
261 clear_slob_page_free(sp);
10cef602
MM
262 return cur;
263 }
95b35127
NP
264 if (slob_last(cur))
265 return NULL;
266 }
267}
10cef602 268
95b35127
NP
269/*
270 * slob_alloc: entry point into the slob allocator.
271 */
272static void *slob_alloc(size_t size, gfp_t gfp, int align)
273{
274 struct slob_page *sp;
275 slob_t *b = NULL;
276 unsigned long flags;
10cef602 277
95b35127
NP
278 spin_lock_irqsave(&slob_lock, flags);
279 /* Iterate through each partially free page, try to find room */
280 list_for_each_entry(sp, &free_slob_pages, list) {
281 if (sp->units >= SLOB_UNITS(size)) {
282 b = slob_page_alloc(sp, size, align);
283 if (b)
284 break;
10cef602
MM
285 }
286 }
95b35127
NP
287 spin_unlock_irqrestore(&slob_lock, flags);
288
289 /* Not enough space: must allocate a new page */
290 if (!b) {
291 b = (slob_t *)__get_free_page(gfp);
292 if (!b)
293 return 0;
294 sp = (struct slob_page *)virt_to_page(b);
295 set_slob_page(sp);
296
297 spin_lock_irqsave(&slob_lock, flags);
298 sp->units = SLOB_UNITS(PAGE_SIZE);
299 sp->free = b;
300 INIT_LIST_HEAD(&sp->list);
301 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
302 set_slob_page_free(sp);
303 b = slob_page_alloc(sp, size, align);
304 BUG_ON(!b);
305 spin_unlock_irqrestore(&slob_lock, flags);
306 }
307 return b;
10cef602
MM
308}
309
95b35127
NP
310/*
311 * slob_free: entry point into the slob allocator.
312 */
10cef602
MM
313static void slob_free(void *block, int size)
314{
95b35127
NP
315 struct slob_page *sp;
316 slob_t *prev, *next, *b = (slob_t *)block;
317 slobidx_t units;
10cef602
MM
318 unsigned long flags;
319
320 if (!block)
321 return;
95b35127 322 BUG_ON(!size);
10cef602 323
95b35127
NP
324 sp = (struct slob_page *)virt_to_page(block);
325 units = SLOB_UNITS(size);
10cef602 326
10cef602 327 spin_lock_irqsave(&slob_lock, flags);
10cef602 328
95b35127
NP
329 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
330 /* Go directly to page allocator. Do not pass slob allocator */
331 if (slob_page_free(sp))
332 clear_slob_page_free(sp);
333 clear_slob_page(sp);
334 free_slob_page(sp);
335 free_page((unsigned long)b);
336 goto out;
337 }
10cef602 338
95b35127
NP
339 if (!slob_page_free(sp)) {
340 /* This slob page is about to become partially free. Easy! */
341 sp->units = units;
342 sp->free = b;
343 set_slob(b, units,
344 (void *)((unsigned long)(b +
345 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
346 set_slob_page_free(sp);
347 goto out;
348 }
349
350 /*
351 * Otherwise the page is already partially free, so find reinsertion
352 * point.
353 */
354 sp->units += units;
10cef602 355
95b35127
NP
356 if (b < sp->free) {
357 set_slob(b, units, sp->free);
358 sp->free = b;
359 } else {
360 prev = sp->free;
361 next = slob_next(prev);
362 while (b > next) {
363 prev = next;
364 next = slob_next(prev);
365 }
10cef602 366
95b35127
NP
367 if (!slob_last(prev) && b + units == next) {
368 units += slob_units(next);
369 set_slob(b, units, slob_next(next));
370 } else
371 set_slob(b, units, next);
372
373 if (prev + slob_units(prev) == b) {
374 units = slob_units(b) + slob_units(prev);
375 set_slob(prev, units, slob_next(b));
376 } else
377 set_slob(prev, slob_units(prev), b);
378 }
379out:
10cef602
MM
380 spin_unlock_irqrestore(&slob_lock, flags);
381}
382
95b35127
NP
383/*
384 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
385 */
386
2e892f43 387void *__kmalloc(size_t size, gfp_t gfp)
10cef602 388{
10cef602 389 if (size < PAGE_SIZE - SLOB_UNIT) {
d87a133f 390 slob_t *m;
10cef602 391 m = slob_alloc(size + SLOB_UNIT, gfp, 0);
95b35127
NP
392 if (m)
393 m->units = size;
394 return m+1;
d87a133f
NP
395 } else {
396 void *ret;
397
398 ret = (void *) __get_free_pages(gfp | __GFP_COMP,
399 get_order(size));
400 if (ret) {
401 struct page *page;
402 page = virt_to_page(ret);
403 page->private = size;
404 }
405 return ret;
10cef602 406 }
10cef602 407}
2e892f43 408EXPORT_SYMBOL(__kmalloc);
10cef602 409
fd76bab2
PE
410/**
411 * krealloc - reallocate memory. The contents will remain unchanged.
412 *
413 * @p: object to reallocate memory for.
414 * @new_size: how many bytes of memory are required.
415 * @flags: the type of memory to allocate.
416 *
417 * The contents of the object pointed to are preserved up to the
418 * lesser of the new and old sizes. If @p is %NULL, krealloc()
419 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
420 * %NULL pointer, the object pointed to is freed.
421 */
422void *krealloc(const void *p, size_t new_size, gfp_t flags)
423{
424 void *ret;
425
426 if (unlikely(!p))
427 return kmalloc_track_caller(new_size, flags);
428
429 if (unlikely(!new_size)) {
430 kfree(p);
431 return NULL;
432 }
433
434 ret = kmalloc_track_caller(new_size, flags);
435 if (ret) {
436 memcpy(ret, p, min(new_size, ksize(p)));
437 kfree(p);
438 }
439 return ret;
440}
441EXPORT_SYMBOL(krealloc);
442
10cef602
MM
443void kfree(const void *block)
444{
95b35127 445 struct slob_page *sp;
10cef602
MM
446
447 if (!block)
448 return;
449
95b35127 450 sp = (struct slob_page *)virt_to_page(block);
d87a133f
NP
451 if (slob_page(sp)) {
452 slob_t *m = (slob_t *)block - 1;
453 slob_free(m, m->units + SLOB_UNIT);
454 } else
455 put_page(&sp->page);
10cef602
MM
456}
457
458EXPORT_SYMBOL(kfree);
459
d87a133f 460/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
fd76bab2 461size_t ksize(const void *block)
10cef602 462{
95b35127 463 struct slob_page *sp;
10cef602
MM
464
465 if (!block)
466 return 0;
467
95b35127 468 sp = (struct slob_page *)virt_to_page(block);
d87a133f
NP
469 if (slob_page(sp))
470 return ((slob_t *)block - 1)->units + SLOB_UNIT;
471 else
472 return sp->page.private;
10cef602
MM
473}
474
475struct kmem_cache {
476 unsigned int size, align;
afc0cedb 477 unsigned long flags;
10cef602
MM
478 const char *name;
479 void (*ctor)(void *, struct kmem_cache *, unsigned long);
10cef602
MM
480};
481
482struct kmem_cache *kmem_cache_create(const char *name, size_t size,
483 size_t align, unsigned long flags,
484 void (*ctor)(void*, struct kmem_cache *, unsigned long),
485 void (*dtor)(void*, struct kmem_cache *, unsigned long))
486{
487 struct kmem_cache *c;
488
489 c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
490
491 if (c) {
492 c->name = name;
493 c->size = size;
afc0cedb 494 if (flags & SLAB_DESTROY_BY_RCU) {
afc0cedb
NP
495 /* leave room for rcu footer at the end of object */
496 c->size += sizeof(struct slob_rcu);
497 }
498 c->flags = flags;
10cef602 499 c->ctor = ctor;
10cef602 500 /* ignore alignment unless it's forced */
5af60839 501 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
10cef602
MM
502 if (c->align < align)
503 c->align = align;
bc0055ae
AM
504 } else if (flags & SLAB_PANIC)
505 panic("Cannot create slab cache %s\n", name);
10cef602
MM
506
507 return c;
508}
509EXPORT_SYMBOL(kmem_cache_create);
510
133d205a 511void kmem_cache_destroy(struct kmem_cache *c)
10cef602
MM
512{
513 slob_free(c, sizeof(struct kmem_cache));
10cef602
MM
514}
515EXPORT_SYMBOL(kmem_cache_destroy);
516
517void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
518{
519 void *b;
520
521 if (c->size < PAGE_SIZE)
522 b = slob_alloc(c->size, flags, c->align);
523 else
4ab688c5 524 b = (void *)__get_free_pages(flags, get_order(c->size));
10cef602
MM
525
526 if (c->ctor)
a35afb83 527 c->ctor(b, c, 0);
10cef602
MM
528
529 return b;
530}
531EXPORT_SYMBOL(kmem_cache_alloc);
532
a8c0f9a4
PE
533void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
534{
535 void *ret = kmem_cache_alloc(c, flags);
536 if (ret)
537 memset(ret, 0, c->size);
538
539 return ret;
540}
541EXPORT_SYMBOL(kmem_cache_zalloc);
542
afc0cedb 543static void __kmem_cache_free(void *b, int size)
10cef602 544{
afc0cedb
NP
545 if (size < PAGE_SIZE)
546 slob_free(b, size);
10cef602 547 else
afc0cedb
NP
548 free_pages((unsigned long)b, get_order(size));
549}
550
551static void kmem_rcu_free(struct rcu_head *head)
552{
553 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
554 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
555
556 __kmem_cache_free(b, slob_rcu->size);
557}
558
559void kmem_cache_free(struct kmem_cache *c, void *b)
560{
561 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
562 struct slob_rcu *slob_rcu;
563 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
564 INIT_RCU_HEAD(&slob_rcu->head);
565 slob_rcu->size = c->size;
566 call_rcu(&slob_rcu->head, kmem_rcu_free);
567 } else {
afc0cedb
NP
568 __kmem_cache_free(b, c->size);
569 }
10cef602
MM
570}
571EXPORT_SYMBOL(kmem_cache_free);
572
573unsigned int kmem_cache_size(struct kmem_cache *c)
574{
575 return c->size;
576}
577EXPORT_SYMBOL(kmem_cache_size);
578
579const char *kmem_cache_name(struct kmem_cache *c)
580{
581 return c->name;
582}
583EXPORT_SYMBOL(kmem_cache_name);
584
2e892f43
CL
585int kmem_cache_shrink(struct kmem_cache *d)
586{
587 return 0;
588}
589EXPORT_SYMBOL(kmem_cache_shrink);
590
55935a34 591int kmem_ptr_validate(struct kmem_cache *a, const void *b)
2e892f43
CL
592{
593 return 0;
594}
595
bcb4ddb4
DG
596void __init kmem_cache_init(void)
597{
10cef602 598}