* that can be shared across processes and threads
*/
#include <sys/mman.h>
-#include <stdio.h>
-#include <stdlib.h>
#include <assert.h>
#include <string.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <limits.h>
-#include <fcntl.h>
-
-#include "mutex.h"
-#include "arch/arch.h"
+#ifdef CONFIG_VALGRIND_DEV
+#include <valgrind/valgrind.h>
+#else
+#define RUNNING_ON_VALGRIND 0
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, size, rzB, is_zeroed) do { } while (0)
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do { } while (0)
+#endif
+
+#include "fio.h"
+#include "fio_sem.h"
#include "os/os.h"
#include "smalloc.h"
#include "log.h"
#endif
struct pool {
- struct fio_mutex *lock; /* protects this pool */
+ struct fio_sem *lock; /* protects this pool */
void *map; /* map of blocks */
unsigned int *bitmap; /* blocks free/busy map */
size_t free_blocks; /* free blocks */
size_t mmap_size;
};
+#ifdef SMALLOC_REDZONE
+#define REDZONE_SIZE sizeof(unsigned int)
+#else
+#define REDZONE_SIZE 0
+#endif
+
struct block_hdr {
size_t size;
#ifdef SMALLOC_REDZONE
goto out_fail;
pool->map = ptr;
- pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
+ pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL));
memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
- pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
+ pool->lock = fio_sem_init(FIO_SEM_UNLOCKED);
if (!pool->lock)
goto out_fail;
munmap(pool->map, pool->mmap_size);
if (pool->lock)
- fio_mutex_remove(pool->lock);
+ fio_sem_remove(pool->lock);
}
void scleanup(void)
uintptr_t ptr;
ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
- ptr = (ptr + int_mask) & ~int_mask;
+ ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask);
return (void *) ptr;
}
{
unsigned int *postred = postred_ptr(hdr);
+ /* Let Valgrind fill the red zones. */
+ if (RUNNING_ON_VALGRIND)
+ return;
+
hdr->prered = SMALLOC_PRE_RED;
*postred = SMALLOC_POST_RED;
}
{
unsigned int *postred = postred_ptr(hdr);
+ /* Let Valgrind check the red zones. */
+ if (RUNNING_ON_VALGRIND)
+ return;
+
if (hdr->prered != SMALLOC_PRE_RED) {
log_err("smalloc pre redzone destroyed!\n"
" ptr=%p, prered=%x, expected %x\n",
i = offset / SMALLOC_BPL;
idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
- fio_mutex_down(pool->lock);
+ fio_sem_down(pool->lock);
clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
if (i < pool->next_non_full)
pool->next_non_full = i;
pool->free_blocks += size_to_blocks(hdr->size);
- fio_mutex_up(pool->lock);
+ fio_sem_up(pool->lock);
}
void sfree(void *ptr)
}
if (pool) {
+ VALGRIND_FREELIKE_BLOCK(ptr, REDZONE_SIZE);
sfree_pool(pool, ptr);
return;
}
unsigned int last_idx;
void *ret = NULL;
- fio_mutex_down(pool->lock);
+ fio_sem_down(pool->lock);
nr_blocks = size_to_blocks(size);
if (nr_blocks > pool->free_blocks)
ret = pool->map + offset;
}
fail:
- fio_mutex_up(pool->lock);
+ fio_sem_up(pool->lock);
return ret;
}
return ptr;
}
-void *smalloc(size_t size)
+static void *__smalloc(size_t size, bool is_zeroed)
{
unsigned int i, end_pool;
if (ptr) {
last_pool = i;
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, size,
+ REDZONE_SIZE,
+ is_zeroed);
return ptr;
}
}
return NULL;
}
+void *smalloc(size_t size)
+{
+ return __smalloc(size, false);
+}
+
void *scalloc(size_t nmemb, size_t size)
{
- return smalloc(nmemb * size);
+ return __smalloc(nmemb * size, true);
}
char *smalloc_strdup(const char *str)