t/stest: make the test more challenging
[fio.git] / smalloc.c
... / ...
CommitLineData
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <assert.h>
7#include <string.h>
8
9#include "fio.h"
10#include "fio_sem.h"
11#include "os/os.h"
12#include "smalloc.h"
13#include "log.h"
14
15#define SMALLOC_REDZONE /* define to detect memory corruption */
16
17#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
18#define SMALLOC_BPI (sizeof(unsigned int) * 8)
19#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
20
21#define INITIAL_SIZE 16*1024*1024 /* new pool size */
22#define INITIAL_POOLS 8 /* maximum number of pools to setup */
23
24#define MAX_POOLS 16
25
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
28
29unsigned int smalloc_pool_size = INITIAL_SIZE;
30#ifdef SMALLOC_REDZONE
31static const int int_mask = sizeof(int) - 1;
32#endif
33
34struct pool {
35 struct fio_sem *lock; /* protects this pool */
36 void *map; /* map of blocks */
37 unsigned int *bitmap; /* blocks free/busy map */
38 size_t free_blocks; /* free blocks */
39 size_t nr_blocks; /* total blocks */
40 size_t next_non_full;
41 size_t mmap_size;
42};
43
44struct block_hdr {
45 size_t size;
46#ifdef SMALLOC_REDZONE
47 unsigned int prered;
48#endif
49};
50
51/*
52 * This suppresses the voluminous potential bitmap printout when
53 * smalloc encounters an OOM error
54 */
55static const bool enable_smalloc_debug = false;
56
57static struct pool mp[MAX_POOLS];
58static unsigned int nr_pools;
59static unsigned int last_pool;
60
61static inline int ptr_valid(struct pool *pool, void *ptr)
62{
63 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
64
65 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
66}
67
68static inline size_t size_to_blocks(size_t size)
69{
70 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
71}
72
73static int blocks_iter(struct pool *pool, unsigned int pool_idx,
74 unsigned int idx, size_t nr_blocks,
75 int (*func)(unsigned int *map, unsigned int mask))
76{
77
78 while (nr_blocks) {
79 unsigned int this_blocks, mask;
80 unsigned int *map;
81
82 if (pool_idx >= pool->nr_blocks)
83 return 0;
84
85 map = &pool->bitmap[pool_idx];
86
87 this_blocks = nr_blocks;
88 if (this_blocks + idx > SMALLOC_BPI) {
89 this_blocks = SMALLOC_BPI - idx;
90 idx = SMALLOC_BPI - this_blocks;
91 }
92
93 if (this_blocks == SMALLOC_BPI)
94 mask = -1U;
95 else
96 mask = ((1U << this_blocks) - 1) << idx;
97
98 if (!func(map, mask))
99 return 0;
100
101 nr_blocks -= this_blocks;
102 idx = 0;
103 pool_idx++;
104 }
105
106 return 1;
107}
108
109static int mask_cmp(unsigned int *map, unsigned int mask)
110{
111 return !(*map & mask);
112}
113
114static int mask_clear(unsigned int *map, unsigned int mask)
115{
116 assert((*map & mask) == mask);
117 *map &= ~mask;
118 return 1;
119}
120
121static int mask_set(unsigned int *map, unsigned int mask)
122{
123 assert(!(*map & mask));
124 *map |= mask;
125 return 1;
126}
127
128static int blocks_free(struct pool *pool, unsigned int pool_idx,
129 unsigned int idx, size_t nr_blocks)
130{
131 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
132}
133
134static void set_blocks(struct pool *pool, unsigned int pool_idx,
135 unsigned int idx, size_t nr_blocks)
136{
137 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
138}
139
140static void clear_blocks(struct pool *pool, unsigned int pool_idx,
141 unsigned int idx, size_t nr_blocks)
142{
143 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
144}
145
146static int find_next_zero(int word, int start)
147{
148 assert(word != -1U);
149 word >>= start;
150 return ffz(word) + start;
151}
152
153static bool add_pool(struct pool *pool, unsigned int alloc_size)
154{
155 int bitmap_blocks;
156 int mmap_flags;
157 void *ptr;
158
159 if (nr_pools == MAX_POOLS)
160 return false;
161
162#ifdef SMALLOC_REDZONE
163 alloc_size += sizeof(unsigned int);
164#endif
165 alloc_size += sizeof(struct block_hdr);
166 if (alloc_size < INITIAL_SIZE)
167 alloc_size = INITIAL_SIZE;
168
169 /* round up to nearest full number of blocks */
170 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
171 bitmap_blocks = alloc_size / SMALLOC_BPL;
172 alloc_size += bitmap_blocks * sizeof(unsigned int);
173 pool->mmap_size = alloc_size;
174
175 pool->nr_blocks = bitmap_blocks;
176 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
177
178 mmap_flags = OS_MAP_ANON;
179#ifdef CONFIG_ESX
180 mmap_flags |= MAP_PRIVATE;
181#else
182 mmap_flags |= MAP_SHARED;
183#endif
184 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
185
186 if (ptr == MAP_FAILED)
187 goto out_fail;
188
189 pool->map = ptr;
190 pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL));
191 memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
192
193 pool->lock = fio_sem_init(FIO_SEM_UNLOCKED);
194 if (!pool->lock)
195 goto out_fail;
196
197 nr_pools++;
198 return true;
199out_fail:
200 log_err("smalloc: failed adding pool\n");
201 if (pool->map)
202 munmap(pool->map, pool->mmap_size);
203 return false;
204}
205
206void sinit(void)
207{
208 bool ret;
209 int i;
210
211 for (i = 0; i < INITIAL_POOLS; i++) {
212 ret = add_pool(&mp[nr_pools], smalloc_pool_size);
213 if (!ret)
214 break;
215 }
216
217 /*
218 * If we added at least one pool, we should be OK for most
219 * cases.
220 */
221 assert(i);
222}
223
224static void cleanup_pool(struct pool *pool)
225{
226 /*
227 * This will also remove the temporary file we used as a backing
228 * store, it was already unlinked
229 */
230 munmap(pool->map, pool->mmap_size);
231
232 if (pool->lock)
233 fio_sem_remove(pool->lock);
234}
235
236void scleanup(void)
237{
238 unsigned int i;
239
240 for (i = 0; i < nr_pools; i++)
241 cleanup_pool(&mp[i]);
242}
243
244#ifdef SMALLOC_REDZONE
245static void *postred_ptr(struct block_hdr *hdr)
246{
247 uintptr_t ptr;
248
249 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
250 ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask);
251
252 return (void *) ptr;
253}
254
255static void fill_redzone(struct block_hdr *hdr)
256{
257 unsigned int *postred = postred_ptr(hdr);
258
259 hdr->prered = SMALLOC_PRE_RED;
260 *postred = SMALLOC_POST_RED;
261}
262
263static void sfree_check_redzone(struct block_hdr *hdr)
264{
265 unsigned int *postred = postred_ptr(hdr);
266
267 if (hdr->prered != SMALLOC_PRE_RED) {
268 log_err("smalloc pre redzone destroyed!\n"
269 " ptr=%p, prered=%x, expected %x\n",
270 hdr, hdr->prered, SMALLOC_PRE_RED);
271 assert(0);
272 }
273 if (*postred != SMALLOC_POST_RED) {
274 log_err("smalloc post redzone destroyed!\n"
275 " ptr=%p, postred=%x, expected %x\n",
276 hdr, *postred, SMALLOC_POST_RED);
277 assert(0);
278 }
279}
280#else
281static void fill_redzone(struct block_hdr *hdr)
282{
283}
284
285static void sfree_check_redzone(struct block_hdr *hdr)
286{
287}
288#endif
289
290static void sfree_pool(struct pool *pool, void *ptr)
291{
292 struct block_hdr *hdr;
293 unsigned int i, idx;
294 unsigned long offset;
295
296 if (!ptr)
297 return;
298
299 ptr -= sizeof(*hdr);
300 hdr = ptr;
301
302 assert(ptr_valid(pool, ptr));
303
304 sfree_check_redzone(hdr);
305
306 offset = ptr - pool->map;
307 i = offset / SMALLOC_BPL;
308 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
309
310 fio_sem_down(pool->lock);
311 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
312 if (i < pool->next_non_full)
313 pool->next_non_full = i;
314 pool->free_blocks += size_to_blocks(hdr->size);
315 fio_sem_up(pool->lock);
316}
317
318void sfree(void *ptr)
319{
320 struct pool *pool = NULL;
321 unsigned int i;
322
323 if (!ptr)
324 return;
325
326 for (i = 0; i < nr_pools; i++) {
327 if (ptr_valid(&mp[i], ptr)) {
328 pool = &mp[i];
329 break;
330 }
331 }
332
333 if (pool) {
334 sfree_pool(pool, ptr);
335 return;
336 }
337
338 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
339}
340
341static void *__smalloc_pool(struct pool *pool, size_t size)
342{
343 size_t nr_blocks;
344 unsigned int i;
345 unsigned int offset;
346 unsigned int last_idx;
347 void *ret = NULL;
348
349 fio_sem_down(pool->lock);
350
351 nr_blocks = size_to_blocks(size);
352 if (nr_blocks > pool->free_blocks)
353 goto fail;
354
355 i = pool->next_non_full;
356 last_idx = 0;
357 offset = -1U;
358 while (i < pool->nr_blocks) {
359 unsigned int idx;
360
361 if (pool->bitmap[i] == -1U) {
362 i++;
363 pool->next_non_full = i;
364 last_idx = 0;
365 continue;
366 }
367
368 idx = find_next_zero(pool->bitmap[i], last_idx);
369 if (!blocks_free(pool, i, idx, nr_blocks)) {
370 idx += nr_blocks;
371 if (idx < SMALLOC_BPI)
372 last_idx = idx;
373 else {
374 last_idx = 0;
375 while (idx >= SMALLOC_BPI) {
376 i++;
377 idx -= SMALLOC_BPI;
378 }
379 }
380 continue;
381 }
382 set_blocks(pool, i, idx, nr_blocks);
383 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
384 break;
385 }
386
387 if (i < pool->nr_blocks) {
388 pool->free_blocks -= nr_blocks;
389 ret = pool->map + offset;
390 }
391fail:
392 fio_sem_up(pool->lock);
393 return ret;
394}
395
396static size_t size_to_alloc_size(size_t size)
397{
398 size_t alloc_size = size + sizeof(struct block_hdr);
399
400 /*
401 * Round to int alignment, so that the postred pointer will
402 * be naturally aligned as well.
403 */
404#ifdef SMALLOC_REDZONE
405 alloc_size += sizeof(unsigned int);
406 alloc_size = (alloc_size + int_mask) & ~int_mask;
407#endif
408
409 return alloc_size;
410}
411
412static void *smalloc_pool(struct pool *pool, size_t size)
413{
414 size_t alloc_size = size_to_alloc_size(size);
415 void *ptr;
416
417 ptr = __smalloc_pool(pool, alloc_size);
418 if (ptr) {
419 struct block_hdr *hdr = ptr;
420
421 hdr->size = alloc_size;
422 fill_redzone(hdr);
423
424 ptr += sizeof(*hdr);
425 memset(ptr, 0, size);
426 }
427
428 return ptr;
429}
430
431static void smalloc_print_bitmap(struct pool *pool)
432{
433 size_t nr_blocks = pool->nr_blocks;
434 unsigned int *bitmap = pool->bitmap;
435 unsigned int i, j;
436 char *buffer;
437
438 if (!enable_smalloc_debug)
439 return;
440
441 buffer = malloc(SMALLOC_BPI + 1);
442 if (!buffer)
443 return;
444 buffer[SMALLOC_BPI] = '\0';
445
446 for (i = 0; i < nr_blocks; i++) {
447 unsigned int line = bitmap[i];
448
449 /* skip completely full lines */
450 if (line == -1U)
451 continue;
452
453 for (j = 0; j < SMALLOC_BPI; j++)
454 if ((1 << j) & line)
455 buffer[SMALLOC_BPI-1-j] = '1';
456 else
457 buffer[SMALLOC_BPI-1-j] = '0';
458
459 log_err("smalloc: bitmap %5u, %s\n", i, buffer);
460 }
461
462 free(buffer);
463}
464
465void smalloc_debug(size_t size)
466{
467 unsigned int i;
468 size_t alloc_size = size_to_alloc_size(size);
469 size_t alloc_blocks;
470
471 alloc_blocks = size_to_blocks(alloc_size);
472
473 if (size)
474 log_err("smalloc: size = %lu, alloc_size = %lu, blocks = %lu\n",
475 (unsigned long) size, (unsigned long) alloc_size,
476 (unsigned long) alloc_blocks);
477 for (i = 0; i < nr_pools; i++) {
478 log_err("smalloc: pool %u, free/total blocks %u/%u\n", i,
479 (unsigned int) (mp[i].free_blocks),
480 (unsigned int) (mp[i].nr_blocks*sizeof(unsigned int)*8));
481 if (size && mp[i].free_blocks >= alloc_blocks) {
482 void *ptr = smalloc_pool(&mp[i], size);
483 if (ptr) {
484 sfree(ptr);
485 last_pool = i;
486 log_err("smalloc: smalloc_pool %u succeeded\n", i);
487 } else {
488 log_err("smalloc: smalloc_pool %u failed\n", i);
489 log_err("smalloc: next_non_full=%u, nr_blocks=%u\n",
490 (unsigned int) mp[i].next_non_full, (unsigned int) mp[i].nr_blocks);
491 smalloc_print_bitmap(&mp[i]);
492 }
493 }
494 }
495}
496
497void *smalloc(size_t size)
498{
499 unsigned int i, end_pool;
500
501 if (size != (unsigned int) size)
502 return NULL;
503
504 i = last_pool;
505 end_pool = nr_pools;
506
507 do {
508 for (; i < end_pool; i++) {
509 void *ptr = smalloc_pool(&mp[i], size);
510
511 if (ptr) {
512 last_pool = i;
513 return ptr;
514 }
515 }
516 if (last_pool) {
517 end_pool = last_pool;
518 last_pool = i = 0;
519 continue;
520 }
521
522 break;
523 } while (1);
524
525 log_err("smalloc: OOM. Consider using --alloc-size to increase the "
526 "shared memory available.\n");
527 smalloc_debug(size);
528 return NULL;
529}
530
531void *scalloc(size_t nmemb, size_t size)
532{
533 return smalloc(nmemb * size);
534}
535
536char *smalloc_strdup(const char *str)
537{
538 char *ptr = NULL;
539
540 ptr = smalloc(strlen(str) + 1);
541 if (ptr)
542 strcpy(ptr, str);
543 return ptr;
544}