2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
11 #include <sys/types.h>
15 #include "arch/arch.h"
17 #define MP_SAFE /* define to make thread safe */
18 #define SMALLOC_REDZONE /* define to detect memory corruption */
20 #define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
21 #define SMALLOC_BPI (sizeof(unsigned int) * 8)
22 #define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
24 #define INITIAL_SIZE 1024*1024 /* new pool size */
25 #define MAX_POOLS 128 /* maximum number of pools to setup */
27 #define SMALLOC_PRE_RED 0xdeadbeefU
28 #define SMALLOC_POST_RED 0x5aa55aa5U
30 unsigned int smalloc_pool_size = INITIAL_SIZE;
33 struct fio_mutex *lock; /* protects this pool */
34 void *map; /* map of blocks */
35 unsigned int *bitmap; /* blocks free/busy map */
36 unsigned int free_blocks; /* free blocks */
37 unsigned int nr_blocks; /* total blocks */
38 unsigned int next_non_full;
39 int fd; /* memory backing fd */
40 char file[PATH_MAX]; /* filename for fd */
41 unsigned int mmap_size;
46 #ifdef SMALLOC_REDZONE
51 static struct pool mp[MAX_POOLS];
52 static unsigned int nr_pools;
53 static unsigned int last_pool;
54 static struct fio_mutex *lock;
56 static inline void pool_lock(struct pool *pool)
59 fio_mutex_down(pool->lock);
62 static inline void pool_unlock(struct pool *pool)
65 fio_mutex_up(pool->lock);
68 static inline void global_read_lock(void)
71 fio_mutex_down_read(lock);
74 static inline void global_read_unlock(void)
77 fio_mutex_up_read(lock);
80 static inline void global_write_lock(void)
83 fio_mutex_down_write(lock);
86 static inline void global_write_unlock(void)
89 fio_mutex_up_write(lock);
92 static inline int ptr_valid(struct pool *pool, void *ptr)
94 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
96 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
99 static inline unsigned int size_to_blocks(unsigned int size)
101 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
104 static int blocks_iter(struct pool *pool, unsigned int pool_idx,
105 unsigned int idx, unsigned int nr_blocks,
106 int (*func)(unsigned int *map, unsigned int mask))
110 unsigned int this_blocks, mask;
113 if (pool_idx >= pool->nr_blocks)
116 map = &pool->bitmap[pool_idx];
118 this_blocks = nr_blocks;
119 if (this_blocks + idx > SMALLOC_BPI) {
120 this_blocks = SMALLOC_BPI - idx;
121 idx = SMALLOC_BPI - this_blocks;
124 if (this_blocks == SMALLOC_BPI)
127 mask = ((1U << this_blocks) - 1) << idx;
129 if (!func(map, mask))
132 nr_blocks -= this_blocks;
140 static int mask_cmp(unsigned int *map, unsigned int mask)
142 return !(*map & mask);
145 static int mask_clear(unsigned int *map, unsigned int mask)
147 assert((*map & mask) == mask);
152 static int mask_set(unsigned int *map, unsigned int mask)
154 assert(!(*map & mask));
159 static int blocks_free(struct pool *pool, unsigned int pool_idx,
160 unsigned int idx, unsigned int nr_blocks)
162 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
165 static void set_blocks(struct pool *pool, unsigned int pool_idx,
166 unsigned int idx, unsigned int nr_blocks)
168 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
171 static void clear_blocks(struct pool *pool, unsigned int pool_idx,
172 unsigned int idx, unsigned int nr_blocks)
174 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
177 static int find_next_zero(int word, int start)
180 word >>= (start + 1);
181 return ffz(word) + start + 1;
184 static int add_pool(struct pool *pool, unsigned int alloc_size)
187 int fd, bitmap_blocks;
189 strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX");
190 fd = mkstemp(pool->file);
194 #ifdef SMALLOC_REDZONE
195 alloc_size += sizeof(unsigned int);
197 alloc_size += sizeof(struct block_hdr);
198 if (alloc_size < INITIAL_SIZE)
199 alloc_size = INITIAL_SIZE;
201 /* round up to nearest full number of blocks */
202 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
203 bitmap_blocks = alloc_size / SMALLOC_BPL;
204 alloc_size += bitmap_blocks * sizeof(unsigned int);
205 pool->mmap_size = alloc_size;
207 pool->nr_blocks = bitmap_blocks;
208 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
210 if (ftruncate(fd, alloc_size) < 0)
213 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
214 if (ptr == MAP_FAILED)
217 memset(ptr, 0, alloc_size);
219 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
222 pool->lock = fio_mutex_init(1);
228 * Unlink pool file now. It wont get deleted until the fd is closed,
229 * which happens both for cleanup or unexpected quit. This way we
230 * don't leave temp files around in case of a crash.
238 fprintf(stderr, "smalloc: failed adding pool\n");
240 munmap(pool->map, pool->mmap_size);
253 lock = fio_mutex_rw_init();
255 ret = add_pool(&mp[0], INITIAL_SIZE);
259 static void cleanup_pool(struct pool *pool)
262 * This will also remove the temporary file we used as a backing
263 * store, it was already unlinked
266 munmap(pool->map, pool->mmap_size);
269 fio_mutex_remove(pool->lock);
276 for (i = 0; i < nr_pools; i++)
277 cleanup_pool(&mp[i]);
280 fio_mutex_remove(lock);
283 #ifdef SMALLOC_REDZONE
284 static void fill_redzone(struct block_hdr *hdr)
286 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
288 hdr->prered = SMALLOC_PRE_RED;
289 *postred = SMALLOC_POST_RED;
292 static void sfree_check_redzone(struct block_hdr *hdr)
294 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
296 if (hdr->prered != SMALLOC_PRE_RED) {
297 fprintf(stderr, "smalloc pre redzone destroyed!\n");
298 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
299 hdr, hdr->prered, SMALLOC_PRE_RED);
302 if (*postred != SMALLOC_POST_RED) {
303 fprintf(stderr, "smalloc post redzone destroyed!\n");
304 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
305 hdr, *postred, SMALLOC_POST_RED);
310 static void fill_redzone(struct block_hdr *hdr)
314 static void sfree_check_redzone(struct block_hdr *hdr)
319 static void sfree_pool(struct pool *pool, void *ptr)
321 struct block_hdr *hdr;
323 unsigned long offset;
331 assert(ptr_valid(pool, ptr));
333 sfree_check_redzone(hdr);
335 offset = ptr - pool->map;
336 i = offset / SMALLOC_BPL;
337 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
340 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
341 if (i < pool->next_non_full)
342 pool->next_non_full = i;
343 pool->free_blocks += size_to_blocks(hdr->size);
347 void sfree(void *ptr)
349 struct pool *pool = NULL;
357 for (i = 0; i < nr_pools; i++) {
358 if (ptr_valid(&mp[i], ptr)) {
364 global_read_unlock();
367 sfree_pool(pool, ptr);
370 static void *__smalloc_pool(struct pool *pool, unsigned int size)
372 unsigned int nr_blocks;
375 unsigned int last_idx;
380 nr_blocks = size_to_blocks(size);
381 if (nr_blocks > pool->free_blocks)
384 i = pool->next_non_full;
387 while (i < pool->nr_blocks) {
390 if (pool->bitmap[i] == -1U) {
392 pool->next_non_full = i;
397 idx = find_next_zero(pool->bitmap[i], last_idx);
398 if (!blocks_free(pool, i, idx, nr_blocks)) {
400 if (idx < SMALLOC_BPI)
404 while (idx >= SMALLOC_BPI) {
411 set_blocks(pool, i, idx, nr_blocks);
412 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
416 if (i < pool->nr_blocks) {
417 pool->free_blocks -= nr_blocks;
418 ret = pool->map + offset;
425 static void *smalloc_pool(struct pool *pool, unsigned int size)
427 unsigned int alloc_size = size + sizeof(struct block_hdr);
430 #ifdef SMALLOC_REDZONE
431 alloc_size += sizeof(unsigned int);
434 ptr = __smalloc_pool(pool, alloc_size);
436 struct block_hdr *hdr = ptr;
438 hdr->size = alloc_size;
442 memset(ptr, 0, size);
448 void *smalloc(unsigned int size)
456 for (; i < nr_pools; i++) {
457 void *ptr = smalloc_pool(&mp[i], size);
461 global_write_unlock();
470 if (nr_pools + 1 > MAX_POOLS)
474 if (add_pool(&mp[nr_pools], size))
480 global_write_unlock();
484 char *smalloc_strdup(const char *str)
488 ptr = smalloc(strlen(str) + 1);