#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
-#include <sys/shm.h>
#include <sys/mman.h>
#include "fio.h"
+#ifndef FIO_NO_HAVE_SHM_H
+#include <sys/shm.h>
+#endif
-static void *pinned_mem;
-
-void fio_unpin_memory(void)
+void fio_unpin_memory(struct thread_data *td)
{
- if (pinned_mem) {
- dprint(FD_MEM, "unpinning %llu bytes\n", mlock_size);
- if (munlock(pinned_mem, mlock_size) < 0)
+ if (td->pinned_mem) {
+ dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem);
+ if (munlock(td->pinned_mem, td->o.lockmem) < 0)
perror("munlock");
- munmap(pinned_mem, mlock_size);
- pinned_mem = NULL;
+ munmap(td->pinned_mem, td->o.lockmem);
+ td->pinned_mem = NULL;
}
}
-int fio_pin_memory(void)
+int fio_pin_memory(struct thread_data *td)
{
unsigned long long phys_mem;
- if (!mlock_size)
+ if (!td->o.lockmem)
return 0;
- dprint(FD_MEM, "pinning %llu bytes\n", mlock_size);
+ dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem);
/*
* Don't allow mlock of more than real_mem-128MB
*/
phys_mem = os_phys_mem();
if (phys_mem) {
- if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
- mlock_size = phys_mem - 128 * 1024 * 1024;
+ if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) {
+ td->o.lockmem = phys_mem - 128 * 1024 * 1024;
log_info("fio: limiting mlocked memory to %lluMB\n",
- mlock_size >> 20);
+ td->o.lockmem >> 20);
}
}
- pinned_mem = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE,
+ td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE,
MAP_PRIVATE | OS_MAP_ANON, -1, 0);
- if (pinned_mem == MAP_FAILED) {
+ if (td->pinned_mem == MAP_FAILED) {
perror("malloc locked mem");
- pinned_mem = NULL;
+ td->pinned_mem = NULL;
return 1;
}
- if (mlock(pinned_mem, mlock_size) < 0) {
+ if (mlock(td->pinned_mem, td->o.lockmem) < 0) {
perror("mlock");
- munmap(pinned_mem, mlock_size);
- pinned_mem = NULL;
+ munmap(td->pinned_mem, td->o.lockmem);
+ td->pinned_mem = NULL;
return 1;
}
static int alloc_mem_shm(struct thread_data *td, unsigned int total_mem)
{
- int flags = IPC_CREAT | SHM_R | SHM_W;
+#ifndef CONFIG_NO_SHM
+ int flags = IPC_CREAT | S_IRUSR | S_IWUSR;
if (td->o.mem_type == MEM_SHMHUGE) {
unsigned long mask = td->o.hugepage_size - 1;
" support huge pages.\n");
} else if (errno == ENOMEM) {
log_err("fio: no huge pages available, do you"
- " need to alocate some? See HOWTO.\n");
+ " need to allocate some? See HOWTO.\n");
}
}
}
return 0;
+#else
+ log_err("fio: shm not supported\n");
+ return 1;
+#endif
}
static void free_mem_shm(struct thread_data *td)
{
+#ifndef CONFIG_NO_SHM
struct shmid_ds sbuf;
dprint(FD_MEM, "shmdt/ctl %d %p\n", td->shm_id, td->orig_buffer);
shmdt(td->orig_buffer);
shmctl(td->shm_id, IPC_RMID, &sbuf);
+#endif
}
-static int alloc_mem_mmap(struct thread_data *td, unsigned int total_mem)
+static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
{
- int flags = MAP_PRIVATE;
+ int flags = 0;
+
+ td->mmapfd = -1;
- td->mmapfd = 1;
+ if (td->o.mem_type == MEM_MMAPHUGE) {
+ unsigned long mask = td->o.hugepage_size - 1;
+
+ /* TODO: make sure the file is a real hugetlbfs file */
+ if (!td->o.mmapfile)
+ flags |= MAP_HUGETLB;
+ total_mem = (total_mem + mask) & ~mask;
+ }
- if (td->mmapfile) {
- td->mmapfd = open(td->mmapfile, O_RDWR|O_CREAT, 0644);
+ if (td->o.mmapfile) {
+ td->mmapfd = open(td->o.mmapfile, O_RDWR|O_CREAT, 0644);
if (td->mmapfd < 0) {
td_verror(td, errno, "open mmap file");
td->orig_buffer = NULL;
return 1;
}
- if (ftruncate(td->mmapfd, total_mem) < 0) {
+ if (td->o.mem_type != MEM_MMAPHUGE &&
+ td->o.mem_type != MEM_MMAPSHARED &&
+ ftruncate(td->mmapfd, total_mem) < 0) {
td_verror(td, errno, "truncate mmap file");
td->orig_buffer = NULL;
return 1;
}
+ if (td->o.mem_type == MEM_MMAPHUGE ||
+ td->o.mem_type == MEM_MMAPSHARED)
+ flags |= MAP_SHARED;
+ else
+ flags |= MAP_PRIVATE;
} else
- flags |= OS_MAP_ANON;
+ flags |= OS_MAP_ANON | MAP_PRIVATE;
td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags,
td->mmapfd, 0);
- dprint(FD_MEM, "mmap %u/%d %p\n", total_mem, td->mmapfd,
- td->orig_buffer);
+ dprint(FD_MEM, "mmap %llu/%d %p\n", (unsigned long long) total_mem,
+ td->mmapfd, td->orig_buffer);
if (td->orig_buffer == MAP_FAILED) {
td_verror(td, errno, "mmap");
td->orig_buffer = NULL;
- if (td->mmapfd) {
+ if (td->mmapfd != 1 && td->mmapfd != -1) {
close(td->mmapfd);
- unlink(td->mmapfile);
+ if (td->o.mmapfile)
+ unlink(td->o.mmapfile);
}
return 1;
return 0;
}
-static void free_mem_mmap(struct thread_data *td, unsigned int total_mem)
+static void free_mem_mmap(struct thread_data *td, size_t total_mem)
{
- dprint(FD_MEM, "munmap %u %p\n", total_mem, td->orig_buffer);
+ dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem,
+ td->orig_buffer);
munmap(td->orig_buffer, td->orig_buffer_size);
- if (td->mmapfile) {
- close(td->mmapfd);
- unlink(td->mmapfile);
- free(td->mmapfile);
+ if (td->o.mmapfile) {
+ if (td->mmapfd != -1)
+ close(td->mmapfd);
+ unlink(td->o.mmapfile);
+ free(td->o.mmapfile);
}
}
-static int alloc_mem_malloc(struct thread_data *td, unsigned int total_mem)
+static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
{
td->orig_buffer = malloc(total_mem);
- dprint(FD_MEM, "malloc %u %p\n", total_mem, td->orig_buffer);
+ dprint(FD_MEM, "malloc %llu %p\n", (unsigned long long) total_mem,
+ td->orig_buffer);
return td->orig_buffer == NULL;
}
}
/*
- * Setup the buffer area we need for io.
+ * Set up the buffer area we need for io.
*/
int allocate_io_mem(struct thread_data *td)
{
- unsigned int total_mem;
+ size_t total_mem;
int ret = 0;
- if (td->io_ops->flags & FIO_NOIO)
+ if (td_ioengine_flagged(td, FIO_NOIO))
return 0;
total_mem = td->orig_buffer_size;
- if (td->o.odirect || td->o.mem_align ||
- (td->io_ops->flags & FIO_MEMALIGN)) {
+ if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ td_ioengine_flagged(td, FIO_MEMALIGN)) {
total_mem += page_mask;
if (td->o.mem_align && td->o.mem_align > page_size)
total_mem += td->o.mem_align - page_size;
}
- if (td->o.mem_type == MEM_MALLOC)
+ dprint(FD_MEM, "Alloc %llu for buffers\n", (unsigned long long) total_mem);
+
+ /*
+ * If the IO engine has hooks to allocate/free memory, use those. But
+ * error out if the user explicitly asked for something else.
+ */
+ if (td->io_ops->iomem_alloc) {
+ if (fio_option_is_set(&td->o, mem_type)) {
+ log_err("fio: option 'mem/iomem' conflicts with specified IO engine\n");
+ ret = 1;
+ } else
+ ret = td->io_ops->iomem_alloc(td, total_mem);
+ } else if (td->o.mem_type == MEM_MALLOC)
ret = alloc_mem_malloc(td, total_mem);
else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
ret = alloc_mem_shm(td, total_mem);
- else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
+ else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE ||
+ td->o.mem_type == MEM_MMAPSHARED)
ret = alloc_mem_mmap(td, total_mem);
else {
log_err("fio: bad mem type: %d\n", td->o.mem_type);
unsigned int total_mem;
total_mem = td->orig_buffer_size;
- if (td->o.odirect)
+ if (td->o.odirect || td->o.oatomic)
total_mem += page_mask;
- if (td->o.mem_type == MEM_MALLOC)
+ if (td->io_ops->iomem_alloc) {
+ if (td->io_ops->iomem_free)
+ td->io_ops->iomem_free(td);
+ } else if (td->o.mem_type == MEM_MALLOC)
free_mem_malloc(td);
else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
free_mem_shm(td);
- else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE)
+ else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE ||
+ td->o.mem_type == MEM_MMAPSHARED)
free_mem_mmap(td, total_mem);
else
log_err("Bad memory type %u\n", td->o.mem_type);