2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
12 #include <sys/types.h>
17 #include "arch/arch.h"
22 #define SMALLOC_REDZONE /* define to detect memory corruption */
24 #define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
25 #define SMALLOC_BPI (sizeof(unsigned int) * 8)
26 #define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
28 #define INITIAL_SIZE 16*1024*1024 /* new pool size */
29 #define MAX_POOLS 8 /* maximum number of pools to setup */
31 #define SMALLOC_PRE_RED 0xdeadbeefU
32 #define SMALLOC_POST_RED 0x5aa55aa5U
34 unsigned int smalloc_pool_size = INITIAL_SIZE;
35 #ifdef SMALLOC_REDZONE
36 static const int int_mask = sizeof(int) - 1;
40 struct fio_mutex *lock; /* protects this pool */
41 void *map; /* map of blocks */
42 unsigned int *bitmap; /* blocks free/busy map */
43 size_t free_blocks; /* free blocks */
44 size_t nr_blocks; /* total blocks */
51 #ifdef SMALLOC_REDZONE
56 static struct pool mp[MAX_POOLS];
57 static unsigned int nr_pools;
58 static unsigned int last_pool;
59 static struct fio_rwlock *lock;
61 static inline void pool_lock(struct pool *pool)
63 fio_mutex_down(pool->lock);
66 static inline void pool_unlock(struct pool *pool)
68 fio_mutex_up(pool->lock);
71 static inline void global_read_lock(void)
73 fio_rwlock_read(lock);
76 static inline void global_read_unlock(void)
78 fio_rwlock_unlock(lock);
81 static inline void global_write_lock(void)
83 fio_rwlock_write(lock);
86 static inline void global_write_unlock(void)
88 fio_rwlock_unlock(lock);
91 static inline int ptr_valid(struct pool *pool, void *ptr)
93 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
95 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
98 static inline size_t size_to_blocks(size_t size)
100 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
103 static int blocks_iter(struct pool *pool, unsigned int pool_idx,
104 unsigned int idx, size_t nr_blocks,
105 int (*func)(unsigned int *map, unsigned int mask))
109 unsigned int this_blocks, mask;
112 if (pool_idx >= pool->nr_blocks)
115 map = &pool->bitmap[pool_idx];
117 this_blocks = nr_blocks;
118 if (this_blocks + idx > SMALLOC_BPI) {
119 this_blocks = SMALLOC_BPI - idx;
120 idx = SMALLOC_BPI - this_blocks;
123 if (this_blocks == SMALLOC_BPI)
126 mask = ((1U << this_blocks) - 1) << idx;
128 if (!func(map, mask))
131 nr_blocks -= this_blocks;
139 static int mask_cmp(unsigned int *map, unsigned int mask)
141 return !(*map & mask);
144 static int mask_clear(unsigned int *map, unsigned int mask)
146 assert((*map & mask) == mask);
151 static int mask_set(unsigned int *map, unsigned int mask)
153 assert(!(*map & mask));
158 static int blocks_free(struct pool *pool, unsigned int pool_idx,
159 unsigned int idx, size_t nr_blocks)
161 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
164 static void set_blocks(struct pool *pool, unsigned int pool_idx,
165 unsigned int idx, size_t nr_blocks)
167 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
170 static void clear_blocks(struct pool *pool, unsigned int pool_idx,
171 unsigned int idx, size_t nr_blocks)
173 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
176 static int find_next_zero(int word, int start)
180 return ffz(word) + start;
183 static int add_pool(struct pool *pool, unsigned int alloc_size)
189 #ifdef SMALLOC_REDZONE
190 alloc_size += sizeof(unsigned int);
192 alloc_size += sizeof(struct block_hdr);
193 if (alloc_size < INITIAL_SIZE)
194 alloc_size = INITIAL_SIZE;
196 /* round up to nearest full number of blocks */
197 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
198 bitmap_blocks = alloc_size / SMALLOC_BPL;
199 alloc_size += bitmap_blocks * sizeof(unsigned int);
200 pool->mmap_size = alloc_size;
202 pool->nr_blocks = bitmap_blocks;
203 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
205 mmap_flags = OS_MAP_ANON;
207 mmap_flags |= MAP_PRIVATE;
209 mmap_flags |= MAP_SHARED;
211 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
213 if (ptr == MAP_FAILED)
216 memset(ptr, 0, alloc_size);
218 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
220 pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
227 log_err("smalloc: failed adding pool\n");
229 munmap(pool->map, pool->mmap_size);
237 lock = fio_rwlock_init();
239 for (i = 0; i < MAX_POOLS; i++) {
240 ret = add_pool(&mp[i], INITIAL_SIZE);
246 * If we added at least one pool, we should be OK for most
252 static void cleanup_pool(struct pool *pool)
255 * This will also remove the temporary file we used as a backing
256 * store, it was already unlinked
258 munmap(pool->map, pool->mmap_size);
261 fio_mutex_remove(pool->lock);
268 for (i = 0; i < nr_pools; i++)
269 cleanup_pool(&mp[i]);
272 fio_rwlock_remove(lock);
275 #ifdef SMALLOC_REDZONE
276 static void *postred_ptr(struct block_hdr *hdr)
280 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
281 ptr = (ptr + int_mask) & ~int_mask;
286 static void fill_redzone(struct block_hdr *hdr)
288 unsigned int *postred = postred_ptr(hdr);
290 hdr->prered = SMALLOC_PRE_RED;
291 *postred = SMALLOC_POST_RED;
294 static void sfree_check_redzone(struct block_hdr *hdr)
296 unsigned int *postred = postred_ptr(hdr);
298 if (hdr->prered != SMALLOC_PRE_RED) {
299 log_err("smalloc pre redzone destroyed!\n"
300 " ptr=%p, prered=%x, expected %x\n",
301 hdr, hdr->prered, SMALLOC_PRE_RED);
304 if (*postred != SMALLOC_POST_RED) {
305 log_err("smalloc post redzone destroyed!\n"
306 " ptr=%p, postred=%x, expected %x\n",
307 hdr, *postred, SMALLOC_POST_RED);
312 static void fill_redzone(struct block_hdr *hdr)
316 static void sfree_check_redzone(struct block_hdr *hdr)
321 static void sfree_pool(struct pool *pool, void *ptr)
323 struct block_hdr *hdr;
325 unsigned long offset;
333 assert(ptr_valid(pool, ptr));
335 sfree_check_redzone(hdr);
337 offset = ptr - pool->map;
338 i = offset / SMALLOC_BPL;
339 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
342 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
343 if (i < pool->next_non_full)
344 pool->next_non_full = i;
345 pool->free_blocks += size_to_blocks(hdr->size);
349 void sfree(void *ptr)
351 struct pool *pool = NULL;
359 for (i = 0; i < nr_pools; i++) {
360 if (ptr_valid(&mp[i], ptr)) {
366 global_read_unlock();
369 sfree_pool(pool, ptr);
373 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
376 static void *__smalloc_pool(struct pool *pool, size_t size)
381 unsigned int last_idx;
386 nr_blocks = size_to_blocks(size);
387 if (nr_blocks > pool->free_blocks)
390 i = pool->next_non_full;
393 while (i < pool->nr_blocks) {
396 if (pool->bitmap[i] == -1U) {
398 pool->next_non_full = i;
403 idx = find_next_zero(pool->bitmap[i], last_idx);
404 if (!blocks_free(pool, i, idx, nr_blocks)) {
406 if (idx < SMALLOC_BPI)
410 while (idx >= SMALLOC_BPI) {
417 set_blocks(pool, i, idx, nr_blocks);
418 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
422 if (i < pool->nr_blocks) {
423 pool->free_blocks -= nr_blocks;
424 ret = pool->map + offset;
431 static void *smalloc_pool(struct pool *pool, size_t size)
433 size_t alloc_size = size + sizeof(struct block_hdr);
437 * Round to int alignment, so that the postred pointer will
438 * be naturally aligned as well.
440 #ifdef SMALLOC_REDZONE
441 alloc_size += sizeof(unsigned int);
442 alloc_size = (alloc_size + int_mask) & ~int_mask;
445 ptr = __smalloc_pool(pool, alloc_size);
447 struct block_hdr *hdr = ptr;
449 hdr->size = alloc_size;
453 memset(ptr, 0, size);
459 void *smalloc(size_t size)
461 unsigned int i, end_pool;
463 if (size != (unsigned int) size)
471 for (; i < end_pool; i++) {
472 void *ptr = smalloc_pool(&mp[i], size);
476 global_write_unlock();
481 end_pool = last_pool;
489 global_write_unlock();
493 void *scalloc(size_t nmemb, size_t size)
495 return smalloc(nmemb * size);
498 char *smalloc_strdup(const char *str)
502 ptr = smalloc(strlen(str) + 1);