X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=smalloc.c;h=6f647c060e087c198ce9b580cbef72ff03227847;hp=42008ebfdb6e44732c469b596b7be65786031231;hb=204c368ebe461b08b18124e1e5555a65b128ab7a;hpb=b67a1114b5cea9ef12d14f8821a0d8142998395f diff --git a/smalloc.c b/smalloc.c index 42008ebf..6f647c06 100644 --- a/smalloc.c +++ b/smalloc.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,8 @@ #include "mutex.h" #include "arch/arch.h" #include "os/os.h" +#include "smalloc.h" +#include "log.h" #define SMALLOC_REDZONE /* define to detect memory corruption */ @@ -22,28 +25,29 @@ #define SMALLOC_BPI (sizeof(unsigned int) * 8) #define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI) -#define INITIAL_SIZE 8192*1024 /* new pool size */ -#define MAX_POOLS 128 /* maximum number of pools to setup */ +#define INITIAL_SIZE 16*1024*1024 /* new pool size */ +#define MAX_POOLS 8 /* maximum number of pools to setup */ #define SMALLOC_PRE_RED 0xdeadbeefU #define SMALLOC_POST_RED 0x5aa55aa5U unsigned int smalloc_pool_size = INITIAL_SIZE; -const int int_mask = sizeof(int) - 1; +#ifdef SMALLOC_REDZONE +static const int int_mask = sizeof(int) - 1; +#endif struct pool { struct fio_mutex *lock; /* protects this pool */ void *map; /* map of blocks */ unsigned int *bitmap; /* blocks free/busy map */ - unsigned int free_blocks; /* free blocks */ - unsigned int nr_blocks; /* total blocks */ - unsigned int next_non_full; - int fd; /* memory backing fd */ - unsigned int mmap_size; + size_t free_blocks; /* free blocks */ + size_t nr_blocks; /* total blocks */ + size_t next_non_full; + size_t mmap_size; }; struct block_hdr { - unsigned int size; + size_t size; #ifdef SMALLOC_REDZONE unsigned int prered; #endif @@ -52,37 +56,6 @@ struct block_hdr { static struct pool mp[MAX_POOLS]; static unsigned int nr_pools; static unsigned int last_pool; -static struct fio_mutex *lock; - -static inline void pool_lock(struct pool *pool) -{ - fio_mutex_down(pool->lock); -} - -static inline void pool_unlock(struct pool *pool) -{ - fio_mutex_up(pool->lock); -} - -static inline void global_read_lock(void) -{ - fio_mutex_down_read(lock); -} - -static inline void global_read_unlock(void) -{ - fio_mutex_up_read(lock); -} - -static inline void global_write_lock(void) -{ - fio_mutex_down_write(lock); -} - -static inline void global_write_unlock(void) -{ - fio_mutex_up_write(lock); -} static inline int ptr_valid(struct pool *pool, void *ptr) { @@ -91,13 +64,13 @@ static inline int ptr_valid(struct pool *pool, void *ptr) return (ptr >= pool->map) && (ptr < pool->map + pool_size); } -static inline unsigned int size_to_blocks(unsigned int size) +static inline size_t size_to_blocks(size_t size) { return (size + SMALLOC_BPB - 1) / SMALLOC_BPB; } static int blocks_iter(struct pool *pool, unsigned int pool_idx, - unsigned int idx, unsigned int nr_blocks, + unsigned int idx, size_t nr_blocks, int (*func)(unsigned int *map, unsigned int mask)) { @@ -152,19 +125,19 @@ static int mask_set(unsigned int *map, unsigned int mask) } static int blocks_free(struct pool *pool, unsigned int pool_idx, - unsigned int idx, unsigned int nr_blocks) + unsigned int idx, size_t nr_blocks) { return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp); } static void set_blocks(struct pool *pool, unsigned int pool_idx, - unsigned int idx, unsigned int nr_blocks) + unsigned int idx, size_t nr_blocks) { blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set); } static void clear_blocks(struct pool *pool, unsigned int pool_idx, - unsigned int idx, unsigned int nr_blocks) + unsigned int idx, size_t nr_blocks) { blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear); } @@ -172,20 +145,16 @@ static void clear_blocks(struct pool *pool, unsigned int pool_idx, static int find_next_zero(int word, int start) { assert(word != -1U); - word >>= (start + 1); - return ffz(word) + start + 1; + word >>= start; + return ffz(word) + start; } static int add_pool(struct pool *pool, unsigned int alloc_size) { - int fd, bitmap_blocks; - char file[] = "/tmp/.fio_smalloc.XXXXXX"; + int bitmap_blocks; + int mmap_flags; void *ptr; - fd = mkstemp(file); - if (fd < 0) - goto out_close; - #ifdef SMALLOC_REDZONE alloc_size += sizeof(unsigned int); #endif @@ -202,60 +171,49 @@ static int add_pool(struct pool *pool, unsigned int alloc_size) pool->nr_blocks = bitmap_blocks; pool->free_blocks = bitmap_blocks * SMALLOC_BPB; -#ifdef FIO_HAVE_FALLOCATE - { - int ret; - - ret = posix_fallocate(fd, 0, alloc_size); - if (ret > 0) { - fprintf(stderr, "posix_fallocate pool file failed: %s\n", strerror(ret)); - goto out_unlink; - } - } + mmap_flags = OS_MAP_ANON; +#ifdef CONFIG_ESX + mmap_flags |= MAP_PRIVATE; +#else + mmap_flags |= MAP_SHARED; #endif + ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0); - if (ftruncate(fd, alloc_size) < 0) - goto out_unlink; - - ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) - goto out_unlink; + goto out_fail; - memset(ptr, 0, alloc_size); pool->map = ptr; pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); + memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int)); - pool->lock = fio_mutex_init(1); + pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); if (!pool->lock) - goto out_unlink; - - /* - * Unlink pool file now. It wont get deleted until the fd is closed, - * which happens both for cleanup or unexpected quit. This way we - * don't leave temp files around in case of a crash. - */ - unlink(file); - pool->fd = fd; + goto out_fail; nr_pools++; return 0; -out_unlink: - fprintf(stderr, "smalloc: failed adding pool\n"); +out_fail: + log_err("smalloc: failed adding pool\n"); if (pool->map) munmap(pool->map, pool->mmap_size); - unlink(file); -out_close: - close(fd); return 1; } void sinit(void) { - int ret; + int i, ret; - lock = fio_mutex_rw_init(); - ret = add_pool(&mp[0], INITIAL_SIZE); - assert(!ret); + for (i = 0; i < MAX_POOLS; i++) { + ret = add_pool(&mp[i], smalloc_pool_size); + if (ret) + break; + } + + /* + * If we added at least one pool, we should be OK for most + * cases. + */ + assert(i); } static void cleanup_pool(struct pool *pool) @@ -264,7 +222,6 @@ static void cleanup_pool(struct pool *pool) * This will also remove the temporary file we used as a backing * store, it was already unlinked */ - close(pool->fd); munmap(pool->map, pool->mmap_size); if (pool->lock) @@ -277,17 +234,14 @@ void scleanup(void) for (i = 0; i < nr_pools; i++) cleanup_pool(&mp[i]); - - if (lock) - fio_mutex_remove(lock); } #ifdef SMALLOC_REDZONE static void *postred_ptr(struct block_hdr *hdr) { - unsigned long ptr; + uintptr_t ptr; - ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int); + ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int); ptr = (ptr + int_mask) & ~int_mask; return (void *) ptr; @@ -306,14 +260,14 @@ static void sfree_check_redzone(struct block_hdr *hdr) unsigned int *postred = postred_ptr(hdr); if (hdr->prered != SMALLOC_PRE_RED) { - fprintf(stderr, "smalloc pre redzone destroyed!\n"); - fprintf(stderr, " ptr=%p, prered=%x, expected %x\n", + log_err("smalloc pre redzone destroyed!\n" + " ptr=%p, prered=%x, expected %x\n", hdr, hdr->prered, SMALLOC_PRE_RED); assert(0); } if (*postred != SMALLOC_POST_RED) { - fprintf(stderr, "smalloc post redzone destroyed!\n"); - fprintf(stderr, " ptr=%p, postred=%x, expected %x\n", + log_err("smalloc post redzone destroyed!\n" + " ptr=%p, postred=%x, expected %x\n", hdr, *postred, SMALLOC_POST_RED); assert(0); } @@ -348,12 +302,12 @@ static void sfree_pool(struct pool *pool, void *ptr) i = offset / SMALLOC_BPL; idx = (offset % SMALLOC_BPL) / SMALLOC_BPB; - pool_lock(pool); + fio_mutex_down(pool->lock); clear_blocks(pool, i, idx, size_to_blocks(hdr->size)); if (i < pool->next_non_full) pool->next_non_full = i; pool->free_blocks += size_to_blocks(hdr->size); - pool_unlock(pool); + fio_mutex_up(pool->lock); } void sfree(void *ptr) @@ -364,8 +318,6 @@ void sfree(void *ptr) if (!ptr) return; - global_read_lock(); - for (i = 0; i < nr_pools; i++) { if (ptr_valid(&mp[i], ptr)) { pool = &mp[i]; @@ -373,21 +325,23 @@ void sfree(void *ptr) } } - global_read_unlock(); + if (pool) { + sfree_pool(pool, ptr); + return; + } - assert(pool); - sfree_pool(pool, ptr); + log_err("smalloc: ptr %p not from smalloc pool\n", ptr); } -static void *__smalloc_pool(struct pool *pool, unsigned int size) +static void *__smalloc_pool(struct pool *pool, size_t size) { - unsigned int nr_blocks; + size_t nr_blocks; unsigned int i; unsigned int offset; unsigned int last_idx; void *ret = NULL; - pool_lock(pool); + fio_mutex_down(pool->lock); nr_blocks = size_to_blocks(size); if (nr_blocks > pool->free_blocks) @@ -430,13 +384,13 @@ static void *__smalloc_pool(struct pool *pool, unsigned int size) ret = pool->map + offset; } fail: - pool_unlock(pool); + fio_mutex_up(pool->lock); return ret; } -static void *smalloc_pool(struct pool *pool, unsigned int size) +static void *smalloc_pool(struct pool *pool, size_t size) { - unsigned int alloc_size = size + sizeof(struct block_hdr); + size_t alloc_size = size + sizeof(struct block_hdr); void *ptr; /* @@ -462,47 +416,48 @@ static void *smalloc_pool(struct pool *pool, unsigned int size) return ptr; } -void *smalloc(unsigned int size) +void *smalloc(size_t size) { - unsigned int i; + unsigned int i, end_pool; + + if (size != (unsigned int) size) + return NULL; - global_write_lock(); i = last_pool; + end_pool = nr_pools; do { - for (; i < nr_pools; i++) { + for (; i < end_pool; i++) { void *ptr = smalloc_pool(&mp[i], size); if (ptr) { last_pool = i; - global_write_unlock(); return ptr; } } if (last_pool) { - last_pool = 0; + end_pool = last_pool; + last_pool = i = 0; continue; } - if (nr_pools + 1 > MAX_POOLS) - break; - else { - i = nr_pools; - if (add_pool(&mp[nr_pools], size)) - goto out; - } + break; } while (1); -out: - global_write_unlock(); return NULL; } +void *scalloc(size_t nmemb, size_t size) +{ + return smalloc(nmemb * size); +} + char *smalloc_strdup(const char *str) { - char *ptr; + char *ptr = NULL; ptr = smalloc(strlen(str) + 1); - strcpy(ptr, str); + if (ptr) + strcpy(ptr, str); return ptr; }