X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=smalloc.c;h=3dd1b4960dd9f15186399432800412fa1c45255d;hp=0b9abad301d56ba8cb4ab7b1fb077a149c25aa6b;hb=5e012980ff5f8396a3e3ebc432e1dd32cebedaa1;hpb=d24c33a479fcd68debad128da057814495f65e20 diff --git a/smalloc.c b/smalloc.c index 0b9abad3..3dd1b496 100644 --- a/smalloc.c +++ b/smalloc.c @@ -12,28 +12,39 @@ #include #include "mutex.h" +#include "arch/arch.h" -#undef ENABLE_RESIZE /* define to enable pool resizing */ -#define MP_SAFE /* define to made allocator thread safe */ +#define SMALLOC_REDZONE /* define to detect memory corruption */ -#define INITIAL_SIZE 65536 /* new pool size */ -#define MAX_POOLS 32 /* maximum number of pools to setup */ +#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */ +#define SMALLOC_BPI (sizeof(unsigned int) * 8) +#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI) -#ifdef ENABLE_RESIZE -#define MAX_SIZE 8 * INITIAL_SIZE -static unsigned int resize_error; -#endif +#define INITIAL_SIZE 8192*1024 /* new pool size */ +#define MAX_POOLS 128 /* maximum number of pools to setup */ + +#define SMALLOC_PRE_RED 0xdeadbeefU +#define SMALLOC_POST_RED 0x5aa55aa5U + +unsigned int smalloc_pool_size = INITIAL_SIZE; +const int int_mask = sizeof(int) - 1; struct pool { struct fio_mutex *lock; /* protects this pool */ void *map; /* map of blocks */ - void *last; /* next free block hint */ - unsigned int size; /* size of pool */ - unsigned int room; /* size left in pool */ - unsigned int largest_block; /* largest block free */ - unsigned int free_since_compact; /* sfree() since compact() */ + unsigned int *bitmap; /* blocks free/busy map */ + unsigned int free_blocks; /* free blocks */ + unsigned int nr_blocks; /* total blocks */ + unsigned int next_non_full; int fd; /* memory backing fd */ - char file[PATH_MAX]; /* filename for fd */ + unsigned int mmap_size; +}; + +struct block_hdr { + unsigned int size; +#ifdef SMALLOC_REDZONE + unsigned int prered; +#endif }; static struct pool mp[MAX_POOLS]; @@ -41,232 +52,206 @@ static unsigned int nr_pools; static unsigned int last_pool; static struct fio_mutex *lock; -struct mem_hdr { - unsigned int size; -}; - static inline void pool_lock(struct pool *pool) { - if (pool->lock) - fio_mutex_down(pool->lock); + fio_mutex_down(pool->lock); } static inline void pool_unlock(struct pool *pool) { - if (pool->lock) - fio_mutex_up(pool->lock); + fio_mutex_up(pool->lock); } -static inline void global_lock(void) +static inline void global_read_lock(void) { - if (lock) - fio_mutex_down(lock); + fio_mutex_down_read(lock); } -static inline void global_unlock(void) +static inline void global_read_unlock(void) { - if (lock) - fio_mutex_up(lock); + fio_mutex_up_read(lock); } -#define hdr_free(hdr) ((hdr)->size & 0x80000000) -#define hdr_size(hdr) ((hdr)->size & ~0x80000000) -#define hdr_mark_free(hdr) ((hdr)->size |= 0x80000000) - -static inline int ptr_valid(struct pool *pool, void *ptr) +static inline void global_write_lock(void) { - return (ptr >= pool->map) && (ptr < pool->map + pool->size); + fio_mutex_down_write(lock); } -static inline int __hdr_valid(struct pool *pool, struct mem_hdr *hdr, - unsigned int size) +static inline void global_write_unlock(void) { - return ptr_valid(pool, hdr) && ptr_valid(pool, (void *) hdr + size - 1); + fio_mutex_up_write(lock); } -static inline int hdr_valid(struct pool *pool, struct mem_hdr *hdr) +static inline int ptr_valid(struct pool *pool, void *ptr) { - return __hdr_valid(pool, hdr, hdr_size(hdr)); + unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL; + + return (ptr >= pool->map) && (ptr < pool->map + pool_size); } -static inline int region_free(struct mem_hdr *hdr) +static inline unsigned int size_to_blocks(unsigned int size) { - return hdr_free(hdr) || (!hdr_free(hdr) && !hdr_size(hdr)); + return (size + SMALLOC_BPB - 1) / SMALLOC_BPB; } -static inline struct mem_hdr *__hdr_nxt(struct pool *pool, struct mem_hdr *hdr, - unsigned int size) +static int blocks_iter(struct pool *pool, unsigned int pool_idx, + unsigned int idx, unsigned int nr_blocks, + int (*func)(unsigned int *map, unsigned int mask)) { - struct mem_hdr *nxt = (void *) hdr + size + sizeof(*hdr); - if (__hdr_valid(pool, nxt, size)) - return nxt; + while (nr_blocks) { + unsigned int this_blocks, mask; + unsigned int *map; - return NULL; -} + if (pool_idx >= pool->nr_blocks) + return 0; -static inline struct mem_hdr *hdr_nxt(struct pool *pool, struct mem_hdr *hdr) -{ - return __hdr_nxt(pool, hdr, hdr_size(hdr)); -} + map = &pool->bitmap[pool_idx]; -static void merge(struct pool *pool, struct mem_hdr *hdr, struct mem_hdr *nxt) -{ - unsigned int hfree = hdr_free(hdr); - unsigned int nfree = hdr_free(nxt); - - hdr->size = hdr_size(hdr) + hdr_size(nxt) + sizeof(*nxt); - nxt->size = 0; + this_blocks = nr_blocks; + if (this_blocks + idx > SMALLOC_BPI) { + this_blocks = SMALLOC_BPI - idx; + idx = SMALLOC_BPI - this_blocks; + } - if (hfree) - hdr_mark_free(hdr); - if (nfree) - hdr_mark_free(nxt); + if (this_blocks == SMALLOC_BPI) + mask = -1U; + else + mask = ((1U << this_blocks) - 1) << idx; - if (pool->last == nxt) - pool->last = hdr; -} + if (!func(map, mask)) + return 0; -static int combine(struct pool *pool, struct mem_hdr *prv, struct mem_hdr *hdr) -{ - if (prv && hdr_free(prv) && hdr_free(hdr)) { - merge(pool, prv, hdr); - return 1; + nr_blocks -= this_blocks; + idx = 0; + pool_idx++; } - return 0; + return 1; } -static int compact_pool(struct pool *pool) +static int mask_cmp(unsigned int *map, unsigned int mask) { - struct mem_hdr *hdr = pool->map, *nxt; - unsigned int compacted = 0; - - if (pool->free_since_compact < 50) - return 1; - - while (hdr) { - nxt = hdr_nxt(pool, hdr); - if (!nxt) - break; - if (hdr_free(nxt) && hdr_free(hdr)) { - merge(pool, hdr, nxt); - compacted++; - continue; - } - hdr = hdr_nxt(pool, hdr); - } - - pool->free_since_compact = 0; - return !!compacted; + return !(*map & mask); } -static int resize_pool(struct pool *pool) +static int mask_clear(unsigned int *map, unsigned int mask) { -#ifdef ENABLE_RESIZE - unsigned int new_size = pool->size << 1; - struct mem_hdr *hdr, *last_hdr; - void *ptr; + assert((*map & mask) == mask); + *map &= ~mask; + return 1; +} - if (new_size >= MAX_SIZE || resize_error) - return 1; +static int mask_set(unsigned int *map, unsigned int mask) +{ + assert(!(*map & mask)); + *map |= mask; + return 1; +} - if (ftruncate(pool->fd, new_size) < 0) - goto fail; +static int blocks_free(struct pool *pool, unsigned int pool_idx, + unsigned int idx, unsigned int nr_blocks) +{ + return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp); +} - ptr = mremap(pool->map, pool->size, new_size, 0); - if (ptr == MAP_FAILED) - goto fail; +static void set_blocks(struct pool *pool, unsigned int pool_idx, + unsigned int idx, unsigned int nr_blocks) +{ + blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set); +} - pool->map = ptr; - hdr = pool; - do { - last_hdr = hdr; - } while ((hdr = hdr_nxt(hdr)) != NULL); - - if (hdr_free(last_hdr)) { - last_hdr->size = hdr_size(last_hdr) + new_size - pool_size; - hdr_mark_free(last_hdr); - } else { - struct mem_hdr *nxt; - - nxt = (void *) last_hdr + hdr_size(last_hdr) + sizeof(*hdr); - nxt->size = new_size - pool_size - sizeof(*hdr); - hdr_mark_free(nxt); - } +static void clear_blocks(struct pool *pool, unsigned int pool_idx, + unsigned int idx, unsigned int nr_blocks) +{ + blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear); +} - pool_room += new_size - pool_size; - pool_size = new_size; - return 0; -fail: - perror("resize"); - resize_error = 1; -#else - return 1; -#endif +static int find_next_zero(int word, int start) +{ + assert(word != -1U); + word >>= (start + 1); + return ffz(word) + start + 1; } -static int add_pool(struct pool *pool) +static int add_pool(struct pool *pool, unsigned int alloc_size) { - struct mem_hdr *hdr; + int fd, bitmap_blocks; + char file[] = "/tmp/.fio_smalloc.XXXXXX"; void *ptr; - int fd; - strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX"); - fd = mkstemp(pool->file); + fd = mkstemp(file); if (fd < 0) goto out_close; - pool->size = INITIAL_SIZE; - if (ftruncate(fd, pool->size) < 0) +#ifdef SMALLOC_REDZONE + alloc_size += sizeof(unsigned int); +#endif + alloc_size += sizeof(struct block_hdr); + if (alloc_size < INITIAL_SIZE) + alloc_size = INITIAL_SIZE; + + /* round up to nearest full number of blocks */ + alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1); + bitmap_blocks = alloc_size / SMALLOC_BPL; + alloc_size += bitmap_blocks * sizeof(unsigned int); + pool->mmap_size = alloc_size; + + pool->nr_blocks = bitmap_blocks; + pool->free_blocks = bitmap_blocks * SMALLOC_BPB; + + if (ftruncate(fd, alloc_size) < 0) goto out_unlink; - ptr = mmap(NULL, pool->size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) goto out_unlink; - memset(ptr, 0, pool->size); - pool->map = pool->last = ptr; + memset(ptr, 0, alloc_size); + pool->map = ptr; + pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); -#ifdef MP_SAFE pool->lock = fio_mutex_init(1); if (!pool->lock) goto out_unlink; -#endif + /* + * Unlink pool file now. It wont get deleted until the fd is closed, + * which happens both for cleanup or unexpected quit. This way we + * don't leave temp files around in case of a crash. + */ + unlink(file); pool->fd = fd; - hdr = pool->map; - pool->room = hdr->size = pool->size - sizeof(*hdr); - pool->largest_block = pool->room; - hdr_mark_free(hdr); nr_pools++; return 0; out_unlink: + fprintf(stderr, "smalloc: failed adding pool\n"); if (pool->map) - munmap(pool->map, pool->size); - unlink(pool->file); + munmap(pool->map, pool->mmap_size); + unlink(file); out_close: - if (fd >= 0) - close(fd); + close(fd); return 1; } void sinit(void) { - int ret = add_pool(&mp[0]); + int ret; -#ifdef MP_SAFE - lock = fio_mutex_init(1); -#endif + lock = fio_mutex_rw_init(); + ret = add_pool(&mp[0], INITIAL_SIZE); assert(!ret); } static void cleanup_pool(struct pool *pool) { - unlink(pool->file); + /* + * This will also remove the temporary file we used as a backing + * store, it was already unlinked + */ close(pool->fd); - munmap(pool->map, pool->size); + munmap(pool->map, pool->mmap_size); if (pool->lock) fio_mutex_remove(pool->lock); @@ -283,29 +268,77 @@ void scleanup(void) fio_mutex_remove(lock); } +#ifdef SMALLOC_REDZONE +static void *postred_ptr(struct block_hdr *hdr) +{ + unsigned long ptr; + + ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int); + ptr = (ptr + int_mask) & ~int_mask; + + return (void *) ptr; +} + +static void fill_redzone(struct block_hdr *hdr) +{ + unsigned int *postred = postred_ptr(hdr); + + hdr->prered = SMALLOC_PRE_RED; + *postred = SMALLOC_POST_RED; +} + +static void sfree_check_redzone(struct block_hdr *hdr) +{ + unsigned int *postred = postred_ptr(hdr); + + if (hdr->prered != SMALLOC_PRE_RED) { + fprintf(stderr, "smalloc pre redzone destroyed!\n"); + fprintf(stderr, " ptr=%p, prered=%x, expected %x\n", + hdr, hdr->prered, SMALLOC_PRE_RED); + assert(0); + } + if (*postred != SMALLOC_POST_RED) { + fprintf(stderr, "smalloc post redzone destroyed!\n"); + fprintf(stderr, " ptr=%p, postred=%x, expected %x\n", + hdr, *postred, SMALLOC_POST_RED); + assert(0); + } +} +#else +static void fill_redzone(struct block_hdr *hdr) +{ +} + +static void sfree_check_redzone(struct block_hdr *hdr) +{ +} +#endif + static void sfree_pool(struct pool *pool, void *ptr) { - struct mem_hdr *hdr, *nxt; + struct block_hdr *hdr; + unsigned int i, idx; + unsigned long offset; if (!ptr) return; - assert(ptr_valid(pool, ptr)); + ptr -= sizeof(*hdr); + hdr = ptr; - pool_lock(pool); - hdr = ptr - sizeof(*hdr); - assert(!hdr_free(hdr)); - hdr_mark_free(hdr); - pool->room -= hdr_size(hdr); + assert(ptr_valid(pool, ptr)); - nxt = hdr_nxt(pool, hdr); - if (nxt && hdr_free(nxt)) - merge(pool, hdr, nxt); + sfree_check_redzone(hdr); - if (hdr_size(hdr) > pool->largest_block) - pool->largest_block = hdr_size(hdr); + offset = ptr - pool->map; + i = offset / SMALLOC_BPL; + idx = (offset % SMALLOC_BPL) / SMALLOC_BPB; - pool->free_since_compact++; + pool_lock(pool); + clear_blocks(pool, i, idx, size_to_blocks(hdr->size)); + if (i < pool->next_non_full) + pool->next_non_full = i; + pool->free_blocks += size_to_blocks(hdr->size); pool_unlock(pool); } @@ -314,7 +347,10 @@ void sfree(void *ptr) struct pool *pool = NULL; unsigned int i; - global_lock(); + if (!ptr) + return; + + global_read_lock(); for (i = 0; i < nr_pools; i++) { if (ptr_valid(&mp[i], ptr)) { @@ -323,102 +359,100 @@ void sfree(void *ptr) } } - global_unlock(); + global_read_unlock(); assert(pool); sfree_pool(pool, ptr); } -static void *smalloc_pool(struct pool *pool, unsigned int size) +static void *__smalloc_pool(struct pool *pool, unsigned int size) { - struct mem_hdr *hdr, *prv; - int did_restart = 0; - void *ret; - - /* - * slight chance of race with sfree() here, but acceptable - */ - if (!size || size > pool->room + sizeof(*hdr) || - ((size > pool->largest_block) && pool->largest_block)) - return NULL; + unsigned int nr_blocks; + unsigned int i; + unsigned int offset; + unsigned int last_idx; + void *ret = NULL; pool_lock(pool); -restart: - hdr = pool->last; - prv = NULL; - do { - if (combine(pool, prv, hdr)) - hdr = prv; - - if (hdr_free(hdr) && hdr_size(hdr) >= size) - break; - prv = hdr; - } while ((hdr = hdr_nxt(pool, hdr)) != NULL); - - if (!hdr) + nr_blocks = size_to_blocks(size); + if (nr_blocks > pool->free_blocks) goto fail; - /* - * more room, adjust next header if any - */ - if (hdr_size(hdr) - size >= 2 * sizeof(*hdr)) { - struct mem_hdr *nxt = __hdr_nxt(pool, hdr, size); - - if (nxt) { - nxt->size = hdr_size(hdr) - size - sizeof(*hdr); - if (hdr_size(hdr) == pool->largest_block) - pool->largest_block = hdr_size(nxt); - hdr_mark_free(nxt); - } else - size = hdr_size(hdr); - } else - size = hdr_size(hdr); - - if (size == hdr_size(hdr) && size == pool->largest_block) - pool->largest_block = 0; + i = pool->next_non_full; + last_idx = 0; + offset = -1U; + while (i < pool->nr_blocks) { + unsigned int idx; - /* - * also clears free bit - */ - hdr->size = size; - pool->last = hdr_nxt(pool, hdr); - if (!pool->last) - pool->last = pool->map; - pool->room -= size; - pool_unlock(pool); + if (pool->bitmap[i] == -1U) { + i++; + pool->next_non_full = i; + last_idx = 0; + continue; + } - ret = (void *) hdr + sizeof(*hdr); - memset(ret, 0, size); - return ret; -fail: - /* - * if we fail to allocate, first compact the entries that we missed. - * if that also fails, increase the size of the pool - */ - ++did_restart; - if (did_restart <= 1) { - if (!compact_pool(pool)) { - pool->last = pool->map; - goto restart; + idx = find_next_zero(pool->bitmap[i], last_idx); + if (!blocks_free(pool, i, idx, nr_blocks)) { + idx += nr_blocks; + if (idx < SMALLOC_BPI) + last_idx = idx; + else { + last_idx = 0; + while (idx >= SMALLOC_BPI) { + i++; + idx -= SMALLOC_BPI; + } + } + continue; } + set_blocks(pool, i, idx, nr_blocks); + offset = i * SMALLOC_BPL + idx * SMALLOC_BPB; + break; } - ++did_restart; - if (did_restart <= 2) { - if (!resize_pool(pool)) { - pool->last = pool->map; - goto restart; - } + + if (i < pool->nr_blocks) { + pool->free_blocks -= nr_blocks; + ret = pool->map + offset; } +fail: pool_unlock(pool); - return NULL; + return ret; +} + +static void *smalloc_pool(struct pool *pool, unsigned int size) +{ + unsigned int alloc_size = size + sizeof(struct block_hdr); + void *ptr; + + /* + * Round to int alignment, so that the postred pointer will + * be naturally aligned as well. + */ +#ifdef SMALLOC_REDZONE + alloc_size += sizeof(unsigned int); + alloc_size = (alloc_size + int_mask) & ~int_mask; +#endif + + ptr = __smalloc_pool(pool, alloc_size); + if (ptr) { + struct block_hdr *hdr = ptr; + + hdr->size = alloc_size; + fill_redzone(hdr); + + ptr += sizeof(*hdr); + memset(ptr, 0, size); + } + + return ptr; } void *smalloc(unsigned int size) { unsigned int i; - global_lock(); + global_write_lock(); i = last_pool; do { @@ -427,7 +461,7 @@ void *smalloc(unsigned int size) if (ptr) { last_pool = i; - global_unlock(); + global_write_unlock(); return ptr; } } @@ -436,16 +470,17 @@ void *smalloc(unsigned int size) continue; } - if (nr_pools + 1 >= MAX_POOLS) + if (nr_pools + 1 > MAX_POOLS) break; else { i = nr_pools; - if (add_pool(&mp[nr_pools])) - break; + if (add_pool(&mp[nr_pools], size)) + goto out; } } while (1); - global_unlock(); +out: + global_write_unlock(); return NULL; }