smalloc: use SMALLOC_BPI instead of SMALLOC_BPB in add_pool()
[fio.git] / smalloc.c
CommitLineData
d24c33a4
JA
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
d24c33a4
JA
6#include <assert.h>
7#include <string.h>
d24c33a4 8
248c9436 9#include "fio.h"
971caeb1 10#include "fio_sem.h"
3a8600b4 11#include "os/os.h"
10aa136b 12#include "smalloc.h"
b0f0326a 13#include "log.h"
d24c33a4 14
55f6491d 15#define SMALLOC_REDZONE /* define to detect memory corruption */
d24c33a4 16
ec996e9c
JA
17#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
18#define SMALLOC_BPI (sizeof(unsigned int) * 8)
19#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
20
23bd40f9 21#define INITIAL_SIZE 16*1024*1024 /* new pool size */
5f9454a2
JA
22#define INITIAL_POOLS 8 /* maximum number of pools to setup */
23
24#define MAX_POOLS 16
d24c33a4 25
55f6491d
JA
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
55f6491d 28
2b386d25 29unsigned int smalloc_pool_size = INITIAL_SIZE;
aa1af5fd 30#ifdef SMALLOC_REDZONE
10aa136b 31static const int int_mask = sizeof(int) - 1;
aa1af5fd 32#endif
2b386d25 33
d24c33a4 34struct pool {
971caeb1 35 struct fio_sem *lock; /* protects this pool */
d24c33a4 36 void *map; /* map of blocks */
ec996e9c 37 unsigned int *bitmap; /* blocks free/busy map */
a3ebe7e0
JA
38 size_t free_blocks; /* free blocks */
39 size_t nr_blocks; /* total blocks */
40 size_t next_non_full;
41 size_t mmap_size;
ec996e9c
JA
42};
43
44struct block_hdr {
a3ebe7e0 45 size_t size;
ec996e9c
JA
46#ifdef SMALLOC_REDZONE
47 unsigned int prered;
48#endif
d24c33a4
JA
49};
50
38b253f3
VF
51/*
52 * This suppresses the voluminous potential bitmap printout when
53 * smalloc encounters an OOM error
54 */
55static const bool enable_smalloc_debug = false;
56
247aa73a 57static struct pool *mp;
d24c33a4
JA
58static unsigned int nr_pools;
59static unsigned int last_pool;
d24c33a4 60
d24c33a4
JA
61static inline int ptr_valid(struct pool *pool, void *ptr)
62{
dcb69098 63 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
ec996e9c
JA
64
65 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
d24c33a4
JA
66}
67
a3ebe7e0 68static inline size_t size_to_blocks(size_t size)
808e9ea8
JA
69{
70 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
71}
72
dcb69098 73static int blocks_iter(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 74 unsigned int idx, size_t nr_blocks,
ec996e9c 75 int (*func)(unsigned int *map, unsigned int mask))
d24c33a4 76{
dcb69098 77
ec996e9c
JA
78 while (nr_blocks) {
79 unsigned int this_blocks, mask;
dcb69098
JA
80 unsigned int *map;
81
82 if (pool_idx >= pool->nr_blocks)
83 return 0;
84
85 map = &pool->bitmap[pool_idx];
ec996e9c
JA
86
87 this_blocks = nr_blocks;
88 if (this_blocks + idx > SMALLOC_BPI) {
89 this_blocks = SMALLOC_BPI - idx;
90 idx = SMALLOC_BPI - this_blocks;
91 }
92
93 if (this_blocks == SMALLOC_BPI)
94 mask = -1U;
95 else
96 mask = ((1U << this_blocks) - 1) << idx;
97
98 if (!func(map, mask))
99 return 0;
100
101 nr_blocks -= this_blocks;
102 idx = 0;
dcb69098 103 pool_idx++;
ec996e9c
JA
104 }
105
106 return 1;
d24c33a4
JA
107}
108
ec996e9c 109static int mask_cmp(unsigned int *map, unsigned int mask)
d24c33a4 110{
ec996e9c 111 return !(*map & mask);
d24c33a4
JA
112}
113
ec996e9c 114static int mask_clear(unsigned int *map, unsigned int mask)
d24c33a4 115{
dcb69098 116 assert((*map & mask) == mask);
ec996e9c
JA
117 *map &= ~mask;
118 return 1;
d24c33a4
JA
119}
120
ec996e9c 121static int mask_set(unsigned int *map, unsigned int mask)
d24c33a4 122{
dcb69098 123 assert(!(*map & mask));
ec996e9c
JA
124 *map |= mask;
125 return 1;
d24c33a4
JA
126}
127
dcb69098 128static int blocks_free(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 129 unsigned int idx, size_t nr_blocks)
d24c33a4 130{
dcb69098 131 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
d24c33a4
JA
132}
133
dcb69098 134static void set_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 135 unsigned int idx, size_t nr_blocks)
d24c33a4 136{
dcb69098 137 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
d24c33a4
JA
138}
139
dcb69098 140static void clear_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 141 unsigned int idx, size_t nr_blocks)
d24c33a4 142{
dcb69098 143 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
d24c33a4
JA
144}
145
ec996e9c
JA
146static int find_next_zero(int word, int start)
147{
148 assert(word != -1U);
271067a6
JH
149 word >>= start;
150 return ffz(word) + start;
d24c33a4
JA
151}
152
5f9454a2 153static bool add_pool(struct pool *pool, unsigned int alloc_size)
d24c33a4 154{
8d5844e9 155 int bitmap_blocks;
c8931876 156 int mmap_flags;
b8a6582e 157 void *ptr;
ec996e9c 158
5f9454a2
JA
159 if (nr_pools == MAX_POOLS)
160 return false;
161
55f6491d 162#ifdef SMALLOC_REDZONE
ec996e9c 163 alloc_size += sizeof(unsigned int);
55f6491d 164#endif
ec996e9c
JA
165 alloc_size += sizeof(struct block_hdr);
166 if (alloc_size < INITIAL_SIZE)
167 alloc_size = INITIAL_SIZE;
168
169 /* round up to nearest full number of blocks */
170 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
171 bitmap_blocks = alloc_size / SMALLOC_BPL;
172 alloc_size += bitmap_blocks * sizeof(unsigned int);
173 pool->mmap_size = alloc_size;
0b9d69ec 174
ec996e9c 175 pool->nr_blocks = bitmap_blocks;
4a479420 176 pool->free_blocks = bitmap_blocks * SMALLOC_BPI;
adf57099 177
c8931876
JA
178 mmap_flags = OS_MAP_ANON;
179#ifdef CONFIG_ESX
180 mmap_flags |= MAP_PRIVATE;
181#else
182 mmap_flags |= MAP_SHARED;
183#endif
184 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
185
d24c33a4 186 if (ptr == MAP_FAILED)
8d5844e9 187 goto out_fail;
d24c33a4 188
ec996e9c 189 pool->map = ptr;
17f7fcd0 190 pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL));
9c3e13e3 191 memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
d24c33a4 192
971caeb1 193 pool->lock = fio_sem_init(FIO_SEM_UNLOCKED);
d24c33a4 194 if (!pool->lock)
8d5844e9 195 goto out_fail;
d24c33a4 196
d24c33a4 197 nr_pools++;
5f9454a2 198 return true;
8d5844e9 199out_fail:
b0f0326a 200 log_err("smalloc: failed adding pool\n");
d24c33a4 201 if (pool->map)
ec996e9c 202 munmap(pool->map, pool->mmap_size);
5f9454a2 203 return false;
d24c33a4
JA
204}
205
206void sinit(void)
207{
5f9454a2
JA
208 bool ret;
209 int i;
d24c33a4 210
247aa73a
VF
211 /*
212 * sinit() can be called more than once if alloc-size is
213 * set. But we want to allocate space for the struct pool
214 * instances only once.
215 */
216 if (!mp) {
217 mp = (struct pool *) mmap(NULL,
218 MAX_POOLS * sizeof(struct pool),
219 PROT_READ | PROT_WRITE,
220 OS_MAP_ANON | MAP_SHARED, -1, 0);
221
222 assert(mp != MAP_FAILED);
223 }
224
5f9454a2
JA
225 for (i = 0; i < INITIAL_POOLS; i++) {
226 ret = add_pool(&mp[nr_pools], smalloc_pool_size);
227 if (!ret)
85492cb8
JA
228 break;
229 }
230
231 /*
232 * If we added at least one pool, we should be OK for most
233 * cases.
234 */
235 assert(i);
d24c33a4
JA
236}
237
238static void cleanup_pool(struct pool *pool)
239{
443bb114
JA
240 /*
241 * This will also remove the temporary file we used as a backing
242 * store, it was already unlinked
243 */
ec996e9c 244 munmap(pool->map, pool->mmap_size);
6548f47f
JA
245
246 if (pool->lock)
971caeb1 247 fio_sem_remove(pool->lock);
d24c33a4
JA
248}
249
250void scleanup(void)
251{
252 unsigned int i;
253
254 for (i = 0; i < nr_pools; i++)
255 cleanup_pool(&mp[i]);
247aa73a
VF
256
257 munmap(mp, MAX_POOLS * sizeof(struct pool));
d24c33a4
JA
258}
259
89da54e8 260#ifdef SMALLOC_REDZONE
cf98708d
JA
261static void *postred_ptr(struct block_hdr *hdr)
262{
e43606c2 263 uintptr_t ptr;
cf98708d 264
e43606c2 265 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
248c9436 266 ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask);
cf98708d
JA
267
268 return (void *) ptr;
269}
270
ec996e9c 271static void fill_redzone(struct block_hdr *hdr)
55f6491d 272{
cf98708d 273 unsigned int *postred = postred_ptr(hdr);
55f6491d 274
ec996e9c
JA
275 hdr->prered = SMALLOC_PRE_RED;
276 *postred = SMALLOC_POST_RED;
ec996e9c 277}
55f6491d 278
ec996e9c
JA
279static void sfree_check_redzone(struct block_hdr *hdr)
280{
cf98708d 281 unsigned int *postred = postred_ptr(hdr);
ec996e9c
JA
282
283 if (hdr->prered != SMALLOC_PRE_RED) {
b0f0326a
JA
284 log_err("smalloc pre redzone destroyed!\n"
285 " ptr=%p, prered=%x, expected %x\n",
ec996e9c 286 hdr, hdr->prered, SMALLOC_PRE_RED);
55f6491d
JA
287 assert(0);
288 }
289 if (*postred != SMALLOC_POST_RED) {
b0f0326a
JA
290 log_err("smalloc post redzone destroyed!\n"
291 " ptr=%p, postred=%x, expected %x\n",
ec996e9c 292 hdr, *postred, SMALLOC_POST_RED);
55f6491d
JA
293 assert(0);
294 }
89da54e8
JA
295}
296#else
297static void fill_redzone(struct block_hdr *hdr)
298{
55f6491d
JA
299}
300
89da54e8
JA
301static void sfree_check_redzone(struct block_hdr *hdr)
302{
303}
304#endif
305
d24c33a4
JA
306static void sfree_pool(struct pool *pool, void *ptr)
307{
ec996e9c 308 struct block_hdr *hdr;
179446e0 309 unsigned int i, idx;
ec996e9c 310 unsigned long offset;
d24c33a4
JA
311
312 if (!ptr)
313 return;
314
ec996e9c
JA
315 ptr -= sizeof(*hdr);
316 hdr = ptr;
55f6491d 317
d24c33a4
JA
318 assert(ptr_valid(pool, ptr));
319
ec996e9c 320 sfree_check_redzone(hdr);
d24c33a4 321
ec996e9c
JA
322 offset = ptr - pool->map;
323 i = offset / SMALLOC_BPL;
324 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
d24c33a4 325
971caeb1 326 fio_sem_down(pool->lock);
dcb69098 327 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
ec996e9c
JA
328 if (i < pool->next_non_full)
329 pool->next_non_full = i;
179446e0 330 pool->free_blocks += size_to_blocks(hdr->size);
971caeb1 331 fio_sem_up(pool->lock);
d24c33a4
JA
332}
333
334void sfree(void *ptr)
335{
336 struct pool *pool = NULL;
337 unsigned int i;
338
8e5732e5
JA
339 if (!ptr)
340 return;
341
d24c33a4
JA
342 for (i = 0; i < nr_pools; i++) {
343 if (ptr_valid(&mp[i], ptr)) {
344 pool = &mp[i];
345 break;
346 }
347 }
348
45a65144
JA
349 if (pool) {
350 sfree_pool(pool, ptr);
351 return;
352 }
353
354 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
d24c33a4
JA
355}
356
66b182f9 357static unsigned int find_best_index(struct pool *pool)
554461db 358{
66b182f9
JA
359 unsigned int i;
360
361 assert(pool->free_blocks);
554461db 362
66b182f9
JA
363 for (i = pool->next_non_full; pool->bitmap[i] == -1U; i++) {
364 if (i == pool->nr_blocks - 1) {
365 unsigned int j;
554461db 366
66b182f9
JA
367 for (j = 0; j < pool->nr_blocks; j++)
368 if (pool->bitmap[j] != -1U)
369 return j;
370 }
371 }
82a90566 372
66b182f9 373 return i;
554461db
VF
374}
375
a3ebe7e0 376static void *__smalloc_pool(struct pool *pool, size_t size)
d24c33a4 377{
a3ebe7e0 378 size_t nr_blocks;
ec996e9c
JA
379 unsigned int i;
380 unsigned int offset;
381 unsigned int last_idx;
382 void *ret = NULL;
d24c33a4 383
971caeb1 384 fio_sem_down(pool->lock);
179446e0
JA
385
386 nr_blocks = size_to_blocks(size);
ec996e9c 387 if (nr_blocks > pool->free_blocks)
8e5732e5 388 goto fail;
5ec10eaa 389
66b182f9 390 pool->next_non_full = find_best_index(pool);
554461db 391
ec996e9c
JA
392 last_idx = 0;
393 offset = -1U;
66b182f9 394 i = pool->next_non_full;
ec996e9c
JA
395 while (i < pool->nr_blocks) {
396 unsigned int idx;
d24c33a4 397
ec996e9c
JA
398 if (pool->bitmap[i] == -1U) {
399 i++;
ec996e9c
JA
400 last_idx = 0;
401 continue;
402 }
d24c33a4 403
ec996e9c 404 idx = find_next_zero(pool->bitmap[i], last_idx);
dcb69098 405 if (!blocks_free(pool, i, idx, nr_blocks)) {
ec996e9c
JA
406 idx += nr_blocks;
407 if (idx < SMALLOC_BPI)
408 last_idx = idx;
409 else {
410 last_idx = 0;
411 while (idx >= SMALLOC_BPI) {
412 i++;
413 idx -= SMALLOC_BPI;
414 }
415 }
416 continue;
d24c33a4 417 }
dcb69098 418 set_blocks(pool, i, idx, nr_blocks);
ec996e9c
JA
419 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
420 break;
421 }
422
423 if (i < pool->nr_blocks) {
424 pool->free_blocks -= nr_blocks;
425 ret = pool->map + offset;
d24c33a4 426 }
ec996e9c 427fail:
971caeb1 428 fio_sem_up(pool->lock);
ec996e9c 429 return ret;
d24c33a4
JA
430}
431
38b253f3 432static size_t size_to_alloc_size(size_t size)
55f6491d 433{
a3ebe7e0 434 size_t alloc_size = size + sizeof(struct block_hdr);
55f6491d 435
cf98708d 436 /*
122426da
JA
437 * Round to int alignment, so that the postred pointer will
438 * be naturally aligned as well.
cf98708d 439 */
ec996e9c 440#ifdef SMALLOC_REDZONE
122426da
JA
441 alloc_size += sizeof(unsigned int);
442 alloc_size = (alloc_size + int_mask) & ~int_mask;
ec996e9c
JA
443#endif
444
38b253f3
VF
445 return alloc_size;
446}
447
448static void *smalloc_pool(struct pool *pool, size_t size)
449{
450 size_t alloc_size = size_to_alloc_size(size);
451 void *ptr;
452
ec996e9c 453 ptr = __smalloc_pool(pool, alloc_size);
89da54e8
JA
454 if (ptr) {
455 struct block_hdr *hdr = ptr;
55f6491d 456
89da54e8
JA
457 hdr->size = alloc_size;
458 fill_redzone(hdr);
55f6491d 459
89da54e8
JA
460 ptr += sizeof(*hdr);
461 memset(ptr, 0, size);
462 }
ec996e9c 463
55f6491d 464 return ptr;
55f6491d
JA
465}
466
38b253f3
VF
467static void smalloc_print_bitmap(struct pool *pool)
468{
469 size_t nr_blocks = pool->nr_blocks;
470 unsigned int *bitmap = pool->bitmap;
471 unsigned int i, j;
472 char *buffer;
473
474 if (!enable_smalloc_debug)
475 return;
476
477 buffer = malloc(SMALLOC_BPI + 1);
478 if (!buffer)
479 return;
480 buffer[SMALLOC_BPI] = '\0';
481
482 for (i = 0; i < nr_blocks; i++) {
483 unsigned int line = bitmap[i];
484
485 /* skip completely full lines */
486 if (line == -1U)
487 continue;
488
489 for (j = 0; j < SMALLOC_BPI; j++)
490 if ((1 << j) & line)
491 buffer[SMALLOC_BPI-1-j] = '1';
492 else
493 buffer[SMALLOC_BPI-1-j] = '0';
494
495 log_err("smalloc: bitmap %5u, %s\n", i, buffer);
496 }
497
498 free(buffer);
499}
500
501void smalloc_debug(size_t size)
502{
503 unsigned int i;
504 size_t alloc_size = size_to_alloc_size(size);
505 size_t alloc_blocks;
506
507 alloc_blocks = size_to_blocks(alloc_size);
508
509 if (size)
510 log_err("smalloc: size = %lu, alloc_size = %lu, blocks = %lu\n",
511 (unsigned long) size, (unsigned long) alloc_size,
512 (unsigned long) alloc_blocks);
513 for (i = 0; i < nr_pools; i++) {
514 log_err("smalloc: pool %u, free/total blocks %u/%u\n", i,
515 (unsigned int) (mp[i].free_blocks),
516 (unsigned int) (mp[i].nr_blocks*sizeof(unsigned int)*8));
517 if (size && mp[i].free_blocks >= alloc_blocks) {
518 void *ptr = smalloc_pool(&mp[i], size);
519 if (ptr) {
520 sfree(ptr);
521 last_pool = i;
522 log_err("smalloc: smalloc_pool %u succeeded\n", i);
523 } else {
524 log_err("smalloc: smalloc_pool %u failed\n", i);
525 log_err("smalloc: next_non_full=%u, nr_blocks=%u\n",
526 (unsigned int) mp[i].next_non_full, (unsigned int) mp[i].nr_blocks);
527 smalloc_print_bitmap(&mp[i]);
528 }
529 }
530 }
531}
532
5457259f 533void *smalloc(size_t size)
d24c33a4 534{
85492cb8 535 unsigned int i, end_pool;
d24c33a4 536
7982aa7d
JA
537 if (size != (unsigned int) size)
538 return NULL;
539
d24c33a4 540 i = last_pool;
85492cb8 541 end_pool = nr_pools;
d24c33a4
JA
542
543 do {
85492cb8 544 for (; i < end_pool; i++) {
d24c33a4
JA
545 void *ptr = smalloc_pool(&mp[i], size);
546
547 if (ptr) {
548 last_pool = i;
d24c33a4
JA
549 return ptr;
550 }
551 }
552 if (last_pool) {
85492cb8
JA
553 end_pool = last_pool;
554 last_pool = i = 0;
d24c33a4
JA
555 continue;
556 }
557
85492cb8 558 break;
d24c33a4
JA
559 } while (1);
560
81b3c86f
JA
561 log_err("smalloc: OOM. Consider using --alloc-size to increase the "
562 "shared memory available.\n");
38b253f3 563 smalloc_debug(size);
d24c33a4
JA
564 return NULL;
565}
566
544992f7
JA
567void *scalloc(size_t nmemb, size_t size)
568{
5457259f 569 return smalloc(nmemb * size);
544992f7
JA
570}
571
d24c33a4
JA
572char *smalloc_strdup(const char *str)
573{
2894a2d4 574 char *ptr = NULL;
d24c33a4
JA
575
576 ptr = smalloc(strlen(str) + 1);
2894a2d4
CE
577 if (ptr)
578 strcpy(ptr, str);
d24c33a4
JA
579 return ptr;
580}