X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=smalloc.c;h=a2ad25a0a0d03bf5010c743325ed17e594e0fb83;hp=5047cda5e3438c8215a3bb549577f5a81e5b0a29;hb=fd98fb689d5ad7e9977461e961fff3fdd37f9cb8;hpb=9c3e13e3314da394698ca32f21cc46d46b7cfe47 diff --git a/smalloc.c b/smalloc.c index 5047cda5..a2ad25a0 100644 --- a/smalloc.c +++ b/smalloc.c @@ -3,18 +3,11 @@ * that can be shared across processes and threads */ #include -#include -#include #include #include -#include -#include -#include -#include -#include - -#include "mutex.h" -#include "arch/arch.h" + +#include "fio.h" +#include "fio_sem.h" #include "os/os.h" #include "smalloc.h" #include "log.h" @@ -26,7 +19,9 @@ #define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI) #define INITIAL_SIZE 16*1024*1024 /* new pool size */ -#define MAX_POOLS 8 /* maximum number of pools to setup */ +#define INITIAL_POOLS 8 /* maximum number of pools to setup */ + +#define MAX_POOLS 16 #define SMALLOC_PRE_RED 0xdeadbeefU #define SMALLOC_POST_RED 0x5aa55aa5U @@ -37,7 +32,7 @@ static const int int_mask = sizeof(int) - 1; #endif struct pool { - struct fio_mutex *lock; /* protects this pool */ + struct fio_sem *lock; /* protects this pool */ void *map; /* map of blocks */ unsigned int *bitmap; /* blocks free/busy map */ size_t free_blocks; /* free blocks */ @@ -56,37 +51,6 @@ struct block_hdr { static struct pool mp[MAX_POOLS]; static unsigned int nr_pools; static unsigned int last_pool; -static struct fio_rwlock *lock; - -static inline void pool_lock(struct pool *pool) -{ - fio_mutex_down(pool->lock); -} - -static inline void pool_unlock(struct pool *pool) -{ - fio_mutex_up(pool->lock); -} - -static inline void global_read_lock(void) -{ - fio_rwlock_read(lock); -} - -static inline void global_read_unlock(void) -{ - fio_rwlock_unlock(lock); -} - -static inline void global_write_lock(void) -{ - fio_rwlock_write(lock); -} - -static inline void global_write_unlock(void) -{ - fio_rwlock_unlock(lock); -} static inline int ptr_valid(struct pool *pool, void *ptr) { @@ -180,12 +144,15 @@ static int find_next_zero(int word, int start) return ffz(word) + start; } -static int add_pool(struct pool *pool, unsigned int alloc_size) +static bool add_pool(struct pool *pool, unsigned int alloc_size) { int bitmap_blocks; int mmap_flags; void *ptr; + if (nr_pools == MAX_POOLS) + return false; + #ifdef SMALLOC_REDZONE alloc_size += sizeof(unsigned int); #endif @@ -214,31 +181,30 @@ static int add_pool(struct pool *pool, unsigned int alloc_size) goto out_fail; pool->map = ptr; - pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); + pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL)); memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int)); - pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); + pool->lock = fio_sem_init(FIO_SEM_UNLOCKED); if (!pool->lock) goto out_fail; nr_pools++; - return 0; + return true; out_fail: log_err("smalloc: failed adding pool\n"); if (pool->map) munmap(pool->map, pool->mmap_size); - return 1; + return false; } void sinit(void) { - int i, ret; - - lock = fio_rwlock_init(); + bool ret; + int i; - for (i = 0; i < MAX_POOLS; i++) { - ret = add_pool(&mp[i], INITIAL_SIZE); - if (ret) + for (i = 0; i < INITIAL_POOLS; i++) { + ret = add_pool(&mp[nr_pools], smalloc_pool_size); + if (!ret) break; } @@ -258,7 +224,7 @@ static void cleanup_pool(struct pool *pool) munmap(pool->map, pool->mmap_size); if (pool->lock) - fio_mutex_remove(pool->lock); + fio_sem_remove(pool->lock); } void scleanup(void) @@ -267,9 +233,6 @@ void scleanup(void) for (i = 0; i < nr_pools; i++) cleanup_pool(&mp[i]); - - if (lock) - fio_rwlock_remove(lock); } #ifdef SMALLOC_REDZONE @@ -278,7 +241,7 @@ static void *postred_ptr(struct block_hdr *hdr) uintptr_t ptr; ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int); - ptr = (ptr + int_mask) & ~int_mask; + ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask); return (void *) ptr; } @@ -338,12 +301,12 @@ static void sfree_pool(struct pool *pool, void *ptr) i = offset / SMALLOC_BPL; idx = (offset % SMALLOC_BPL) / SMALLOC_BPB; - pool_lock(pool); + fio_sem_down(pool->lock); clear_blocks(pool, i, idx, size_to_blocks(hdr->size)); if (i < pool->next_non_full) pool->next_non_full = i; pool->free_blocks += size_to_blocks(hdr->size); - pool_unlock(pool); + fio_sem_up(pool->lock); } void sfree(void *ptr) @@ -354,8 +317,6 @@ void sfree(void *ptr) if (!ptr) return; - global_read_lock(); - for (i = 0; i < nr_pools; i++) { if (ptr_valid(&mp[i], ptr)) { pool = &mp[i]; @@ -363,8 +324,6 @@ void sfree(void *ptr) } } - global_read_unlock(); - if (pool) { sfree_pool(pool, ptr); return; @@ -381,7 +340,7 @@ static void *__smalloc_pool(struct pool *pool, size_t size) unsigned int last_idx; void *ret = NULL; - pool_lock(pool); + fio_sem_down(pool->lock); nr_blocks = size_to_blocks(size); if (nr_blocks > pool->free_blocks) @@ -424,7 +383,7 @@ static void *__smalloc_pool(struct pool *pool, size_t size) ret = pool->map + offset; } fail: - pool_unlock(pool); + fio_sem_up(pool->lock); return ret; } @@ -463,7 +422,6 @@ void *smalloc(size_t size) if (size != (unsigned int) size) return NULL; - global_write_lock(); i = last_pool; end_pool = nr_pools; @@ -473,7 +431,6 @@ void *smalloc(size_t size) if (ptr) { last_pool = i; - global_write_unlock(); return ptr; } } @@ -486,7 +443,8 @@ void *smalloc(size_t size) break; } while (1); - global_write_unlock(); + log_err("smalloc: OOM. Consider using --alloc-size to increase the " + "shared memory available.\n"); return NULL; }