#include <limits.h>
#include "mutex.h"
+#include "arch/arch.h"
-#define MP_SAFE /* define to make thread safe */
#define SMALLOC_REDZONE /* define to detect memory corruption */
#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
#define SMALLOC_BPI (sizeof(unsigned int) * 8)
#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
-#define INITIAL_SIZE 1024*1024 /* new pool size */
-#define MAX_POOLS 4 /* maximum number of pools to setup */
+#define INITIAL_SIZE 8192*1024 /* new pool size */
+#define MAX_POOLS 128 /* maximum number of pools to setup */
#define SMALLOC_PRE_RED 0xdeadbeefU
#define SMALLOC_POST_RED 0x5aa55aa5U
unsigned int smalloc_pool_size = INITIAL_SIZE;
+const int int_mask = sizeof(int) - 1;
struct pool {
struct fio_mutex *lock; /* protects this pool */
unsigned int nr_blocks; /* total blocks */
unsigned int next_non_full;
int fd; /* memory backing fd */
- char file[PATH_MAX]; /* filename for fd */
unsigned int mmap_size;
};
static inline void pool_lock(struct pool *pool)
{
- if (pool->lock)
- fio_mutex_down(pool->lock);
+ fio_mutex_down(pool->lock);
}
static inline void pool_unlock(struct pool *pool)
{
- if (pool->lock)
- fio_mutex_up(pool->lock);
+ fio_mutex_up(pool->lock);
}
static inline void global_read_lock(void)
{
- if (lock)
- fio_mutex_down_read(lock);
+ fio_mutex_down_read(lock);
}
static inline void global_read_unlock(void)
{
- if (lock)
- fio_mutex_up_read(lock);
+ fio_mutex_up_read(lock);
}
static inline void global_write_lock(void)
{
- if (lock)
- fio_mutex_down_write(lock);
+ fio_mutex_down_write(lock);
}
static inline void global_write_unlock(void)
{
- if (lock)
- fio_mutex_up_write(lock);
+ fio_mutex_up_write(lock);
}
static inline int ptr_valid(struct pool *pool, void *ptr)
return (ptr >= pool->map) && (ptr < pool->map + pool_size);
}
-static int blocks_iter(unsigned int *map, unsigned int idx,
- unsigned int nr_blocks,
+static inline unsigned int size_to_blocks(unsigned int size)
+{
+ return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
+}
+
+static int blocks_iter(struct pool *pool, unsigned int pool_idx,
+ unsigned int idx, unsigned int nr_blocks,
int (*func)(unsigned int *map, unsigned int mask))
{
+
while (nr_blocks) {
unsigned int this_blocks, mask;
+ unsigned int *map;
+
+ if (pool_idx >= pool->nr_blocks)
+ return 0;
+
+ map = &pool->bitmap[pool_idx];
this_blocks = nr_blocks;
if (this_blocks + idx > SMALLOC_BPI) {
nr_blocks -= this_blocks;
idx = 0;
- map++;
+ pool_idx++;
}
return 1;
static int mask_clear(unsigned int *map, unsigned int mask)
{
+ assert((*map & mask) == mask);
*map &= ~mask;
return 1;
}
static int mask_set(unsigned int *map, unsigned int mask)
{
+ assert(!(*map & mask));
*map |= mask;
return 1;
}
-static int blocks_free(unsigned int *map, unsigned int idx,
- unsigned int nr_blocks)
+static int blocks_free(struct pool *pool, unsigned int pool_idx,
+ unsigned int idx, unsigned int nr_blocks)
{
- return blocks_iter(map, idx, nr_blocks, mask_cmp);
+ return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
}
-static void set_blocks(unsigned int *map, unsigned int idx,
- unsigned int nr_blocks)
+static void set_blocks(struct pool *pool, unsigned int pool_idx,
+ unsigned int idx, unsigned int nr_blocks)
{
- blocks_iter(map, idx, nr_blocks, mask_set);
+ blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
}
-static void clear_blocks(unsigned int *map, unsigned int idx,
- unsigned int nr_blocks)
+static void clear_blocks(struct pool *pool, unsigned int pool_idx,
+ unsigned int idx, unsigned int nr_blocks)
{
- blocks_iter(map, idx, nr_blocks, mask_clear);
-}
-
-static inline int __ffs(int word)
-{
- int r = 0;
-
- if (!(word & 0xffff)) {
- word >>= 16;
- r += 16;
- }
- if (!(word & 0xff)) {
- word >>= 8;
- r += 8;
- }
- if (!(word & 0xf)) {
- word >>= 4;
- r += 4;
- }
- if (!(word & 3)) {
- word >>= 2;
- r += 2;
- }
- if (!(word & 1)) {
- word >>= 1;
- r += 1;
- }
-
- return r;
+ blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
}
static int find_next_zero(int word, int start)
{
assert(word != -1U);
word >>= (start + 1);
- return __ffs(~word) + start + 1;
+ return ffz(word) + start + 1;
}
static int add_pool(struct pool *pool, unsigned int alloc_size)
{
- void *ptr;
int fd, bitmap_blocks;
+ char file[] = "/tmp/.fio_smalloc.XXXXXX";
+ void *ptr;
- strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX");
- fd = mkstemp(pool->file);
+ fd = mkstemp(file);
if (fd < 0)
goto out_close;
pool->map = ptr;
pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
-#ifdef MP_SAFE
pool->lock = fio_mutex_init(1);
if (!pool->lock)
goto out_unlink;
-#endif
+ /*
+ * Unlink pool file now. It wont get deleted until the fd is closed,
+ * which happens both for cleanup or unexpected quit. This way we
+ * don't leave temp files around in case of a crash.
+ */
+ unlink(file);
pool->fd = fd;
- global_write_lock();
nr_pools++;
- global_write_unlock();
return 0;
out_unlink:
fprintf(stderr, "smalloc: failed adding pool\n");
if (pool->map)
munmap(pool->map, pool->mmap_size);
- unlink(pool->file);
+ unlink(file);
out_close:
- if (fd >= 0)
- close(fd);
+ close(fd);
return 1;
}
{
int ret;
-#ifdef MP_SAFE
lock = fio_mutex_rw_init();
-#endif
ret = add_pool(&mp[0], INITIAL_SIZE);
assert(!ret);
}
static void cleanup_pool(struct pool *pool)
{
- unlink(pool->file);
+ /*
+ * This will also remove the temporary file we used as a backing
+ * store, it was already unlinked
+ */
close(pool->fd);
munmap(pool->map, pool->mmap_size);
fio_mutex_remove(lock);
}
+#ifdef SMALLOC_REDZONE
+static void *postred_ptr(struct block_hdr *hdr)
+{
+ unsigned long ptr;
+
+ ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int);
+ ptr = (ptr + int_mask) & ~int_mask;
+
+ return (void *) ptr;
+}
+
static void fill_redzone(struct block_hdr *hdr)
{
-#ifdef SMALLOC_REDZONE
- unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
+ unsigned int *postred = postred_ptr(hdr);
hdr->prered = SMALLOC_PRE_RED;
*postred = SMALLOC_POST_RED;
-#endif
}
static void sfree_check_redzone(struct block_hdr *hdr)
{
-#ifdef SMALLOC_REDZONE
- unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
+ unsigned int *postred = postred_ptr(hdr);
if (hdr->prered != SMALLOC_PRE_RED) {
fprintf(stderr, "smalloc pre redzone destroyed!\n");
hdr, *postred, SMALLOC_POST_RED);
assert(0);
}
-#endif
+}
+#else
+static void fill_redzone(struct block_hdr *hdr)
+{
}
+static void sfree_check_redzone(struct block_hdr *hdr)
+{
+}
+#endif
+
static void sfree_pool(struct pool *pool, void *ptr)
{
struct block_hdr *hdr;
idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
pool_lock(pool);
- clear_blocks(&pool->bitmap[i], idx, size_to_blocks(hdr->size));
+ clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
if (i < pool->next_non_full)
pool->next_non_full = i;
pool->free_blocks += size_to_blocks(hdr->size);
sfree_pool(pool, ptr);
}
-static inline unsigned int size_to_blocks(unsigned int size)
-{
- return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
-}
-
static void *__smalloc_pool(struct pool *pool, unsigned int size)
{
unsigned int nr_blocks;
}
idx = find_next_zero(pool->bitmap[i], last_idx);
- if (!blocks_free(&pool->bitmap[i], idx, nr_blocks)) {
+ if (!blocks_free(pool, i, idx, nr_blocks)) {
idx += nr_blocks;
if (idx < SMALLOC_BPI)
last_idx = idx;
}
continue;
}
- set_blocks(&pool->bitmap[i], idx, nr_blocks);
+ set_blocks(pool, i, idx, nr_blocks);
offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
break;
}
static void *smalloc_pool(struct pool *pool, unsigned int size)
{
- struct block_hdr *hdr;
- unsigned int alloc_size;
+ unsigned int alloc_size = size + sizeof(struct block_hdr);
void *ptr;
- alloc_size = size + sizeof(*hdr);
+ /*
+ * Round to int alignment, so that the postred pointer will
+ * be naturally aligned as well.
+ */
#ifdef SMALLOC_REDZONE
alloc_size += sizeof(unsigned int);
+ alloc_size = (alloc_size + int_mask) & ~int_mask;
#endif
ptr = __smalloc_pool(pool, alloc_size);
- if (!ptr) {
- printf("failed allocating %u\n", alloc_size);
- return NULL;
- }
+ if (ptr) {
+ struct block_hdr *hdr = ptr;
- hdr = ptr;
- hdr->size = alloc_size;
- ptr += sizeof(*hdr);
+ hdr->size = alloc_size;
+ fill_redzone(hdr);
- fill_redzone(hdr);
+ ptr += sizeof(*hdr);
+ memset(ptr, 0, size);
+ }
- memset(ptr, 0, size);
return ptr;
}
{
unsigned int i;
- global_read_lock();
+ global_write_lock();
i = last_pool;
do {
if (ptr) {
last_pool = i;
- global_read_unlock();
+ global_write_unlock();
return ptr;
}
}
break;
else {
i = nr_pools;
- global_read_unlock();
if (add_pool(&mp[nr_pools], size))
goto out;
- global_read_lock();
}
} while (1);
- global_read_unlock();
out:
+ global_write_unlock();
return NULL;
}