2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
12 #include <sys/types.h>
17 #include "arch/arch.h"
21 #define SMALLOC_REDZONE /* define to detect memory corruption */
23 #define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
24 #define SMALLOC_BPI (sizeof(unsigned int) * 8)
25 #define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
27 #define INITIAL_SIZE 8192*1024 /* new pool size */
28 #define MAX_POOLS 128 /* maximum number of pools to setup */
30 #define SMALLOC_PRE_RED 0xdeadbeefU
31 #define SMALLOC_POST_RED 0x5aa55aa5U
33 unsigned int smalloc_pool_size = INITIAL_SIZE;
34 static const int int_mask = sizeof(int) - 1;
37 struct fio_mutex *lock; /* protects this pool */
38 void *map; /* map of blocks */
39 unsigned int *bitmap; /* blocks free/busy map */
40 size_t free_blocks; /* free blocks */
41 size_t nr_blocks; /* total blocks */
48 #ifdef SMALLOC_REDZONE
53 static struct pool mp[MAX_POOLS];
54 static unsigned int nr_pools;
55 static unsigned int last_pool;
56 static struct fio_rwlock *lock;
58 static inline void pool_lock(struct pool *pool)
60 fio_mutex_down(pool->lock);
63 static inline void pool_unlock(struct pool *pool)
65 fio_mutex_up(pool->lock);
68 static inline void global_read_lock(void)
70 fio_rwlock_read(lock);
73 static inline void global_read_unlock(void)
75 fio_rwlock_unlock(lock);
78 static inline void global_write_lock(void)
80 fio_rwlock_write(lock);
83 static inline void global_write_unlock(void)
85 fio_rwlock_unlock(lock);
88 static inline int ptr_valid(struct pool *pool, void *ptr)
90 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
92 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
95 static inline size_t size_to_blocks(size_t size)
97 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
100 static int blocks_iter(struct pool *pool, unsigned int pool_idx,
101 unsigned int idx, size_t nr_blocks,
102 int (*func)(unsigned int *map, unsigned int mask))
106 unsigned int this_blocks, mask;
109 if (pool_idx >= pool->nr_blocks)
112 map = &pool->bitmap[pool_idx];
114 this_blocks = nr_blocks;
115 if (this_blocks + idx > SMALLOC_BPI) {
116 this_blocks = SMALLOC_BPI - idx;
117 idx = SMALLOC_BPI - this_blocks;
120 if (this_blocks == SMALLOC_BPI)
123 mask = ((1U << this_blocks) - 1) << idx;
125 if (!func(map, mask))
128 nr_blocks -= this_blocks;
136 static int mask_cmp(unsigned int *map, unsigned int mask)
138 return !(*map & mask);
141 static int mask_clear(unsigned int *map, unsigned int mask)
143 assert((*map & mask) == mask);
148 static int mask_set(unsigned int *map, unsigned int mask)
150 assert(!(*map & mask));
155 static int blocks_free(struct pool *pool, unsigned int pool_idx,
156 unsigned int idx, size_t nr_blocks)
158 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
161 static void set_blocks(struct pool *pool, unsigned int pool_idx,
162 unsigned int idx, size_t nr_blocks)
164 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
167 static void clear_blocks(struct pool *pool, unsigned int pool_idx,
168 unsigned int idx, size_t nr_blocks)
170 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
173 static int find_next_zero(int word, int start)
177 return ffz(word) + start;
180 static int add_pool(struct pool *pool, unsigned int alloc_size)
185 #ifdef SMALLOC_REDZONE
186 alloc_size += sizeof(unsigned int);
188 alloc_size += sizeof(struct block_hdr);
189 if (alloc_size < INITIAL_SIZE)
190 alloc_size = INITIAL_SIZE;
192 /* round up to nearest full number of blocks */
193 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
194 bitmap_blocks = alloc_size / SMALLOC_BPL;
195 alloc_size += bitmap_blocks * sizeof(unsigned int);
196 pool->mmap_size = alloc_size;
198 pool->nr_blocks = bitmap_blocks;
199 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
201 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE,
202 MAP_SHARED | OS_MAP_ANON, -1, 0);
203 if (ptr == MAP_FAILED)
206 memset(ptr, 0, alloc_size);
208 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
210 pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
217 fprintf(stderr, "smalloc: failed adding pool\n");
219 munmap(pool->map, pool->mmap_size);
227 lock = fio_rwlock_init();
228 ret = add_pool(&mp[0], INITIAL_SIZE);
232 static void cleanup_pool(struct pool *pool)
235 * This will also remove the temporary file we used as a backing
236 * store, it was already unlinked
238 munmap(pool->map, pool->mmap_size);
241 fio_mutex_remove(pool->lock);
248 for (i = 0; i < nr_pools; i++)
249 cleanup_pool(&mp[i]);
252 fio_rwlock_remove(lock);
255 #ifdef SMALLOC_REDZONE
256 static void *postred_ptr(struct block_hdr *hdr)
260 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
261 ptr = (ptr + int_mask) & ~int_mask;
266 static void fill_redzone(struct block_hdr *hdr)
268 unsigned int *postred = postred_ptr(hdr);
270 hdr->prered = SMALLOC_PRE_RED;
271 *postred = SMALLOC_POST_RED;
274 static void sfree_check_redzone(struct block_hdr *hdr)
276 unsigned int *postred = postred_ptr(hdr);
278 if (hdr->prered != SMALLOC_PRE_RED) {
279 fprintf(stderr, "smalloc pre redzone destroyed!\n");
280 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
281 hdr, hdr->prered, SMALLOC_PRE_RED);
284 if (*postred != SMALLOC_POST_RED) {
285 fprintf(stderr, "smalloc post redzone destroyed!\n");
286 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
287 hdr, *postred, SMALLOC_POST_RED);
292 static void fill_redzone(struct block_hdr *hdr)
296 static void sfree_check_redzone(struct block_hdr *hdr)
301 static void sfree_pool(struct pool *pool, void *ptr)
303 struct block_hdr *hdr;
305 unsigned long offset;
313 assert(ptr_valid(pool, ptr));
315 sfree_check_redzone(hdr);
317 offset = ptr - pool->map;
318 i = offset / SMALLOC_BPL;
319 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
322 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
323 if (i < pool->next_non_full)
324 pool->next_non_full = i;
325 pool->free_blocks += size_to_blocks(hdr->size);
329 void sfree(void *ptr)
331 struct pool *pool = NULL;
339 for (i = 0; i < nr_pools; i++) {
340 if (ptr_valid(&mp[i], ptr)) {
346 global_read_unlock();
349 sfree_pool(pool, ptr);
352 static void *__smalloc_pool(struct pool *pool, size_t size)
357 unsigned int last_idx;
362 nr_blocks = size_to_blocks(size);
363 if (nr_blocks > pool->free_blocks)
366 i = pool->next_non_full;
369 while (i < pool->nr_blocks) {
372 if (pool->bitmap[i] == -1U) {
374 pool->next_non_full = i;
379 idx = find_next_zero(pool->bitmap[i], last_idx);
380 if (!blocks_free(pool, i, idx, nr_blocks)) {
382 if (idx < SMALLOC_BPI)
386 while (idx >= SMALLOC_BPI) {
393 set_blocks(pool, i, idx, nr_blocks);
394 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
398 if (i < pool->nr_blocks) {
399 pool->free_blocks -= nr_blocks;
400 ret = pool->map + offset;
407 static void *smalloc_pool(struct pool *pool, size_t size)
409 size_t alloc_size = size + sizeof(struct block_hdr);
413 * Round to int alignment, so that the postred pointer will
414 * be naturally aligned as well.
416 #ifdef SMALLOC_REDZONE
417 alloc_size += sizeof(unsigned int);
418 alloc_size = (alloc_size + int_mask) & ~int_mask;
421 ptr = __smalloc_pool(pool, alloc_size);
423 struct block_hdr *hdr = ptr;
425 hdr->size = alloc_size;
429 memset(ptr, 0, size);
435 void *smalloc(size_t size)
439 if (size != (unsigned int) size)
446 for (; i < nr_pools; i++) {
447 void *ptr = smalloc_pool(&mp[i], size);
451 global_write_unlock();
460 if (nr_pools + 1 > MAX_POOLS)
464 if (add_pool(&mp[nr_pools], size))
470 global_write_unlock();
474 char *smalloc_strdup(const char *str)
478 ptr = smalloc(strlen(str) + 1);