2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
11 #include <sys/types.h>
16 #undef ENABLE_RESIZE /* define to enable pool resizing */
17 #define MP_SAFE /* define to made allocator thread safe */
19 #define INITIAL_SIZE 1048576 /* new pool size */
20 #define MAX_POOLS 32 /* maximum number of pools to setup */
22 unsigned int smalloc_pool_size = INITIAL_SIZE;
25 #define MAX_SIZE 8 * smalloc_pool_size
26 static unsigned int resize_error;
30 struct fio_mutex *lock; /* protects this pool */
31 void *map; /* map of blocks */
32 void *last; /* next free block hint */
33 unsigned int size; /* size of pool */
34 unsigned int room; /* size left in pool */
35 unsigned int largest_block; /* largest block free */
36 unsigned int free_since_compact; /* sfree() since compact() */
37 int fd; /* memory backing fd */
38 char file[PATH_MAX]; /* filename for fd */
41 static struct pool mp[MAX_POOLS];
42 static unsigned int nr_pools;
43 static unsigned int last_pool;
44 static struct fio_mutex *lock;
50 static inline void pool_lock(struct pool *pool)
53 fio_mutex_down(pool->lock);
56 static inline void pool_unlock(struct pool *pool)
59 fio_mutex_up(pool->lock);
62 static inline void global_read_lock(void)
65 fio_mutex_down_read(lock);
68 static inline void global_read_unlock(void)
71 fio_mutex_up_read(lock);
74 static inline void global_write_lock(void)
77 fio_mutex_down_write(lock);
80 static inline void global_write_unlock(void)
83 fio_mutex_up_write(lock);
86 #define hdr_free(hdr) ((hdr)->size & 0x80000000)
87 #define hdr_size(hdr) ((hdr)->size & ~0x80000000)
88 #define hdr_mark_free(hdr) ((hdr)->size |= 0x80000000)
90 static inline int ptr_valid(struct pool *pool, void *ptr)
92 return (ptr >= pool->map) && (ptr < pool->map + pool->size);
95 static inline int __hdr_valid(struct pool *pool, struct mem_hdr *hdr,
98 return ptr_valid(pool, hdr) && ptr_valid(pool, (void *) hdr + size - 1);
101 static inline int hdr_valid(struct pool *pool, struct mem_hdr *hdr)
103 return __hdr_valid(pool, hdr, hdr_size(hdr));
106 static inline int region_free(struct mem_hdr *hdr)
108 return hdr_free(hdr) || (!hdr_free(hdr) && !hdr_size(hdr));
111 static inline struct mem_hdr *__hdr_nxt(struct pool *pool, struct mem_hdr *hdr,
114 struct mem_hdr *nxt = (void *) hdr + size + sizeof(*hdr);
116 if (__hdr_valid(pool, nxt, size))
122 static inline struct mem_hdr *hdr_nxt(struct pool *pool, struct mem_hdr *hdr)
124 return __hdr_nxt(pool, hdr, hdr_size(hdr));
127 static void merge(struct pool *pool, struct mem_hdr *hdr, struct mem_hdr *nxt)
129 unsigned int hfree = hdr_free(hdr);
130 unsigned int nfree = hdr_free(nxt);
132 hdr->size = hdr_size(hdr) + hdr_size(nxt) + sizeof(*nxt);
140 if (pool->last == nxt)
144 static int combine(struct pool *pool, struct mem_hdr *prv, struct mem_hdr *hdr)
146 if (prv && hdr_free(prv) && hdr_free(hdr)) {
147 merge(pool, prv, hdr);
154 static int compact_pool(struct pool *pool)
156 struct mem_hdr *hdr = pool->map, *nxt;
157 unsigned int compacted = 0;
159 if (pool->free_since_compact < 50)
163 nxt = hdr_nxt(pool, hdr);
166 if (hdr_free(nxt) && hdr_free(hdr)) {
167 merge(pool, hdr, nxt);
171 hdr = hdr_nxt(pool, hdr);
174 pool->free_since_compact = 0;
178 static int resize_pool(struct pool *pool)
181 unsigned int new_size = pool->size << 1;
182 struct mem_hdr *hdr, *last_hdr;
185 if (new_size >= MAX_SIZE || resize_error)
188 if (ftruncate(pool->fd, new_size) < 0)
191 ptr = mremap(pool->map, pool->size, new_size, 0);
192 if (ptr == MAP_FAILED)
199 } while ((hdr = hdr_nxt(hdr)) != NULL);
201 if (hdr_free(last_hdr)) {
202 last_hdr->size = hdr_size(last_hdr) + new_size - pool_size;
203 hdr_mark_free(last_hdr);
207 nxt = (void *) last_hdr + hdr_size(last_hdr) + sizeof(*hdr);
208 nxt->size = new_size - pool_size - sizeof(*hdr);
212 pool_room += new_size - pool_size;
213 pool_size = new_size;
223 static int add_pool(struct pool *pool)
229 strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX");
230 fd = mkstemp(pool->file);
234 pool->size = smalloc_pool_size;
235 if (ftruncate(fd, pool->size) < 0)
238 ptr = mmap(NULL, pool->size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
239 if (ptr == MAP_FAILED)
242 memset(ptr, 0, pool->size);
243 pool->map = pool->last = ptr;
246 pool->lock = fio_mutex_init(1);
254 pool->room = hdr->size = pool->size - sizeof(*hdr);
255 pool->largest_block = pool->room;
259 global_write_unlock();
263 munmap(pool->map, pool->size);
276 lock = fio_mutex_rw_init();
278 ret = add_pool(&mp[0]);
282 static void cleanup_pool(struct pool *pool)
286 munmap(pool->map, pool->size);
289 fio_mutex_remove(pool->lock);
296 for (i = 0; i < nr_pools; i++)
297 cleanup_pool(&mp[i]);
300 fio_mutex_remove(lock);
303 static void sfree_pool(struct pool *pool, void *ptr)
305 struct mem_hdr *hdr, *nxt;
310 assert(ptr_valid(pool, ptr));
313 hdr = ptr - sizeof(*hdr);
314 assert(!hdr_free(hdr));
316 pool->room -= hdr_size(hdr);
318 nxt = hdr_nxt(pool, hdr);
319 if (nxt && hdr_free(nxt))
320 merge(pool, hdr, nxt);
322 if (hdr_size(hdr) > pool->largest_block)
323 pool->largest_block = hdr_size(hdr);
325 pool->free_since_compact++;
329 void sfree(void *ptr)
331 struct pool *pool = NULL;
336 for (i = 0; i < nr_pools; i++) {
337 if (ptr_valid(&mp[i], ptr)) {
343 global_read_unlock();
346 sfree_pool(pool, ptr);
349 static void *smalloc_pool(struct pool *pool, unsigned int size)
351 struct mem_hdr *hdr, *prv;
356 * slight chance of race with sfree() here, but acceptable
358 if (!size || size > pool->room + sizeof(*hdr) ||
359 ((size > pool->largest_block) && pool->largest_block))
367 if (combine(pool, prv, hdr))
370 if (hdr_free(hdr) && hdr_size(hdr) >= size)
374 } while ((hdr = hdr_nxt(pool, hdr)) != NULL);
380 * more room, adjust next header if any
382 if (hdr_size(hdr) - size >= 2 * sizeof(*hdr)) {
383 struct mem_hdr *nxt = __hdr_nxt(pool, hdr, size);
386 nxt->size = hdr_size(hdr) - size - sizeof(*hdr);
387 if (hdr_size(hdr) == pool->largest_block)
388 pool->largest_block = hdr_size(nxt);
391 size = hdr_size(hdr);
393 size = hdr_size(hdr);
395 if (size == hdr_size(hdr) && size == pool->largest_block)
396 pool->largest_block = 0;
399 * also clears free bit
402 pool->last = hdr_nxt(pool, hdr);
404 pool->last = pool->map;
408 ret = (void *) hdr + sizeof(*hdr);
409 memset(ret, 0, size);
413 * if we fail to allocate, first compact the entries that we missed.
414 * if that also fails, increase the size of the pool
417 if (did_restart <= 1) {
418 if (!compact_pool(pool)) {
419 pool->last = pool->map;
424 if (did_restart <= 2) {
425 if (!resize_pool(pool)) {
426 pool->last = pool->map;
434 void *smalloc(unsigned int size)
442 for (; i < nr_pools; i++) {
443 void *ptr = smalloc_pool(&mp[i], size);
447 global_read_unlock();
456 if (nr_pools + 1 >= MAX_POOLS)
460 global_read_unlock();
461 if (add_pool(&mp[nr_pools]))
467 global_read_unlock();
472 char *smalloc_strdup(const char *str)
476 ptr = smalloc(strlen(str) + 1);