X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=smalloc.c;h=e82ef1da0b59faab23a706db1257808c3fb10bbb;hp=bad0bcb2a614d1838eb137d7a21934d7b85262c5;hb=c97bd0fa0f68d924f4e6f3c480f380c4ca20b872;hpb=dcb69098630845b53ebb8034014d44b409e16f9e diff --git a/smalloc.c b/smalloc.c index bad0bcb2..e82ef1da 100644 --- a/smalloc.c +++ b/smalloc.c @@ -12,8 +12,8 @@ #include #include "mutex.h" +#include "arch/arch.h" -#define MP_SAFE /* define to make thread safe */ #define SMALLOC_REDZONE /* define to detect memory corruption */ #define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */ @@ -21,7 +21,7 @@ #define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI) #define INITIAL_SIZE 1024*1024 /* new pool size */ -#define MAX_POOLS 4 /* maximum number of pools to setup */ +#define MAX_POOLS 128 /* maximum number of pools to setup */ #define SMALLOC_PRE_RED 0xdeadbeefU #define SMALLOC_POST_RED 0x5aa55aa5U @@ -36,7 +36,6 @@ struct pool { unsigned int nr_blocks; /* total blocks */ unsigned int next_non_full; int fd; /* memory backing fd */ - char file[PATH_MAX]; /* filename for fd */ unsigned int mmap_size; }; @@ -54,38 +53,32 @@ static struct fio_mutex *lock; static inline void pool_lock(struct pool *pool) { - if (pool->lock) - fio_mutex_down(pool->lock); + fio_mutex_down(pool->lock); } static inline void pool_unlock(struct pool *pool) { - if (pool->lock) - fio_mutex_up(pool->lock); + fio_mutex_up(pool->lock); } static inline void global_read_lock(void) { - if (lock) - fio_mutex_down_read(lock); + fio_mutex_down_read(lock); } static inline void global_read_unlock(void) { - if (lock) - fio_mutex_up_read(lock); + fio_mutex_up_read(lock); } static inline void global_write_lock(void) { - if (lock) - fio_mutex_down_write(lock); + fio_mutex_down_write(lock); } static inline void global_write_unlock(void) { - if (lock) - fio_mutex_up_write(lock); + fio_mutex_up_write(lock); } static inline int ptr_valid(struct pool *pool, void *ptr) @@ -173,48 +166,20 @@ static void clear_blocks(struct pool *pool, unsigned int pool_idx, blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear); } -static inline int __ffs(int word) -{ - int r = 0; - - if (!(word & 0xffff)) { - word >>= 16; - r += 16; - } - if (!(word & 0xff)) { - word >>= 8; - r += 8; - } - if (!(word & 0xf)) { - word >>= 4; - r += 4; - } - if (!(word & 3)) { - word >>= 2; - r += 2; - } - if (!(word & 1)) { - word >>= 1; - r += 1; - } - - return r; -} - static int find_next_zero(int word, int start) { assert(word != -1U); word >>= (start + 1); - return __ffs(~word) + start + 1; + return ffz(word) + start + 1; } static int add_pool(struct pool *pool, unsigned int alloc_size) { - void *ptr; int fd, bitmap_blocks; + char file[] = "/tmp/.fio_smalloc.XXXXXX"; + void *ptr; - strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX"); - fd = mkstemp(pool->file); + fd = mkstemp(file); if (fd < 0) goto out_close; @@ -245,26 +210,27 @@ static int add_pool(struct pool *pool, unsigned int alloc_size) pool->map = ptr; pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); -#ifdef MP_SAFE pool->lock = fio_mutex_init(1); if (!pool->lock) goto out_unlink; -#endif + /* + * Unlink pool file now. It wont get deleted until the fd is closed, + * which happens both for cleanup or unexpected quit. This way we + * don't leave temp files around in case of a crash. + */ + unlink(file); pool->fd = fd; - global_write_lock(); nr_pools++; - global_write_unlock(); return 0; out_unlink: fprintf(stderr, "smalloc: failed adding pool\n"); if (pool->map) munmap(pool->map, pool->mmap_size); - unlink(pool->file); + unlink(file); out_close: - if (fd >= 0) - close(fd); + close(fd); return 1; } @@ -272,16 +238,17 @@ void sinit(void) { int ret; -#ifdef MP_SAFE lock = fio_mutex_rw_init(); -#endif ret = add_pool(&mp[0], INITIAL_SIZE); assert(!ret); } static void cleanup_pool(struct pool *pool) { - unlink(pool->file); + /* + * This will also remove the temporary file we used as a backing + * store, it was already unlinked + */ close(pool->fd); munmap(pool->map, pool->mmap_size); @@ -300,19 +267,17 @@ void scleanup(void) fio_mutex_remove(lock); } +#ifdef SMALLOC_REDZONE static void fill_redzone(struct block_hdr *hdr) { -#ifdef SMALLOC_REDZONE unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int); hdr->prered = SMALLOC_PRE_RED; *postred = SMALLOC_POST_RED; -#endif } static void sfree_check_redzone(struct block_hdr *hdr) { -#ifdef SMALLOC_REDZONE unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int); if (hdr->prered != SMALLOC_PRE_RED) { @@ -327,9 +292,17 @@ static void sfree_check_redzone(struct block_hdr *hdr) hdr, *postred, SMALLOC_POST_RED); assert(0); } -#endif +} +#else +static void fill_redzone(struct block_hdr *hdr) +{ } +static void sfree_check_redzone(struct block_hdr *hdr) +{ +} +#endif + static void sfree_pool(struct pool *pool, void *ptr) { struct block_hdr *hdr; @@ -438,26 +411,24 @@ fail: static void *smalloc_pool(struct pool *pool, unsigned int size) { - struct block_hdr *hdr; - unsigned int alloc_size; + unsigned int alloc_size = size + sizeof(struct block_hdr); void *ptr; - alloc_size = size + sizeof(*hdr); #ifdef SMALLOC_REDZONE alloc_size += sizeof(unsigned int); #endif ptr = __smalloc_pool(pool, alloc_size); - if (!ptr) - return NULL; + if (ptr) { + struct block_hdr *hdr = ptr; - hdr = ptr; - hdr->size = alloc_size; - ptr += sizeof(*hdr); + hdr->size = alloc_size; + fill_redzone(hdr); - fill_redzone(hdr); + ptr += sizeof(*hdr); + memset(ptr, 0, size); + } - memset(ptr, 0, size); return ptr; } @@ -465,7 +436,7 @@ void *smalloc(unsigned int size) { unsigned int i; - global_read_lock(); + global_write_lock(); i = last_pool; do { @@ -474,7 +445,7 @@ void *smalloc(unsigned int size) if (ptr) { last_pool = i; - global_read_unlock(); + global_write_unlock(); return ptr; } } @@ -487,15 +458,13 @@ void *smalloc(unsigned int size) break; else { i = nr_pools; - global_read_unlock(); if (add_pool(&mp[nr_pools], size)) goto out; - global_read_lock(); } } while (1); - global_read_unlock(); out: + global_write_unlock(); return NULL; }