#include "mutex.h"
#include "arch/arch.h"
#include "os/os.h"
+#include "smalloc.h"
+#include "log.h"
#define SMALLOC_REDZONE /* define to detect memory corruption */
#define SMALLOC_BPI (sizeof(unsigned int) * 8)
#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
-#define INITIAL_SIZE 8192*1024 /* new pool size */
-#define MAX_POOLS 128 /* maximum number of pools to setup */
+#define INITIAL_SIZE 16*1024*1024 /* new pool size */
+#define MAX_POOLS 8 /* maximum number of pools to setup */
#define SMALLOC_PRE_RED 0xdeadbeefU
#define SMALLOC_POST_RED 0x5aa55aa5U
unsigned int smalloc_pool_size = INITIAL_SIZE;
-const int int_mask = sizeof(int) - 1;
+#ifdef SMALLOC_REDZONE
+static const int int_mask = sizeof(int) - 1;
+#endif
struct pool {
struct fio_mutex *lock; /* protects this pool */
static struct pool mp[MAX_POOLS];
static unsigned int nr_pools;
static unsigned int last_pool;
-static struct fio_rwlock *lock;
-
-static inline void pool_lock(struct pool *pool)
-{
- fio_mutex_down(pool->lock);
-}
-
-static inline void pool_unlock(struct pool *pool)
-{
- fio_mutex_up(pool->lock);
-}
-
-static inline void global_read_lock(void)
-{
- fio_rwlock_read(lock);
-}
-
-static inline void global_read_unlock(void)
-{
- fio_rwlock_unlock(lock);
-}
-
-static inline void global_write_lock(void)
-{
- fio_rwlock_write(lock);
-}
-
-static inline void global_write_unlock(void)
-{
- fio_rwlock_unlock(lock);
-}
static inline int ptr_valid(struct pool *pool, void *ptr)
{
static int add_pool(struct pool *pool, unsigned int alloc_size)
{
int bitmap_blocks;
+ int mmap_flags;
void *ptr;
#ifdef SMALLOC_REDZONE
pool->nr_blocks = bitmap_blocks;
pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
- ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE,
- MAP_SHARED | OS_MAP_ANON, -1, 0);
+ mmap_flags = OS_MAP_ANON;
+#ifdef CONFIG_ESX
+ mmap_flags |= MAP_PRIVATE;
+#else
+ mmap_flags |= MAP_SHARED;
+#endif
+ ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
+
if (ptr == MAP_FAILED)
goto out_fail;
- memset(ptr, 0, alloc_size);
pool->map = ptr;
pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
+ memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
if (!pool->lock)
nr_pools++;
return 0;
out_fail:
- fprintf(stderr, "smalloc: failed adding pool\n");
+ log_err("smalloc: failed adding pool\n");
if (pool->map)
munmap(pool->map, pool->mmap_size);
return 1;
void sinit(void)
{
- int ret;
+ int i, ret;
+
+ for (i = 0; i < MAX_POOLS; i++) {
+ ret = add_pool(&mp[i], INITIAL_SIZE);
+ if (ret)
+ break;
+ }
- lock = fio_rwlock_init();
- ret = add_pool(&mp[0], INITIAL_SIZE);
- assert(!ret);
+ /*
+ * If we added at least one pool, we should be OK for most
+ * cases.
+ */
+ assert(i);
}
static void cleanup_pool(struct pool *pool)
for (i = 0; i < nr_pools; i++)
cleanup_pool(&mp[i]);
-
- if (lock)
- fio_rwlock_remove(lock);
}
#ifdef SMALLOC_REDZONE
unsigned int *postred = postred_ptr(hdr);
if (hdr->prered != SMALLOC_PRE_RED) {
- fprintf(stderr, "smalloc pre redzone destroyed!\n");
- fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
+ log_err("smalloc pre redzone destroyed!\n"
+ " ptr=%p, prered=%x, expected %x\n",
hdr, hdr->prered, SMALLOC_PRE_RED);
assert(0);
}
if (*postred != SMALLOC_POST_RED) {
- fprintf(stderr, "smalloc post redzone destroyed!\n");
- fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
+ log_err("smalloc post redzone destroyed!\n"
+ " ptr=%p, postred=%x, expected %x\n",
hdr, *postred, SMALLOC_POST_RED);
assert(0);
}
i = offset / SMALLOC_BPL;
idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
- pool_lock(pool);
+ fio_mutex_down(pool->lock);
clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
if (i < pool->next_non_full)
pool->next_non_full = i;
pool->free_blocks += size_to_blocks(hdr->size);
- pool_unlock(pool);
+ fio_mutex_up(pool->lock);
}
void sfree(void *ptr)
if (!ptr)
return;
- global_read_lock();
-
for (i = 0; i < nr_pools; i++) {
if (ptr_valid(&mp[i], ptr)) {
pool = &mp[i];
}
}
- global_read_unlock();
+ if (pool) {
+ sfree_pool(pool, ptr);
+ return;
+ }
- assert(pool);
- sfree_pool(pool, ptr);
+ log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
}
static void *__smalloc_pool(struct pool *pool, size_t size)
unsigned int last_idx;
void *ret = NULL;
- pool_lock(pool);
+ fio_mutex_down(pool->lock);
nr_blocks = size_to_blocks(size);
if (nr_blocks > pool->free_blocks)
ret = pool->map + offset;
}
fail:
- pool_unlock(pool);
+ fio_mutex_up(pool->lock);
return ret;
}
void *smalloc(size_t size)
{
- unsigned int i;
+ unsigned int i, end_pool;
if (size != (unsigned int) size)
return NULL;
- global_write_lock();
i = last_pool;
+ end_pool = nr_pools;
do {
- for (; i < nr_pools; i++) {
+ for (; i < end_pool; i++) {
void *ptr = smalloc_pool(&mp[i], size);
if (ptr) {
last_pool = i;
- global_write_unlock();
return ptr;
}
}
if (last_pool) {
- last_pool = 0;
+ end_pool = last_pool;
+ last_pool = i = 0;
continue;
}
- if (nr_pools + 1 > MAX_POOLS)
- break;
- else {
- i = nr_pools;
- if (add_pool(&mp[nr_pools], size))
- goto out;
- }
+ break;
} while (1);
-out:
- global_write_unlock();
return NULL;
}
+void *scalloc(size_t nmemb, size_t size)
+{
+ return smalloc(nmemb * size);
+}
+
char *smalloc_strdup(const char *str)
{
- char *ptr;
+ char *ptr = NULL;
ptr = smalloc(strlen(str) + 1);
- strcpy(ptr, str);
+ if (ptr)
+ strcpy(ptr, str);
return ptr;
}