/*
* libpmem engine
*
- * IO engine that uses libpmem to write data (and memcpy to read)
+ * IO engine that uses libpmem (part of PMDK collection) to write data
+ * and libc's memcpy to read. It requires PMDK >= 1.5.
*
* To use:
* ioengine=libpmem
* mkdir /mnt/pmem0
* mount -o dax /dev/pmem0 /mnt/pmem0
*
- * See examples/libpmem.fio for more.
- *
- *
- * libpmem.so
- * By default, the libpmem engine will let the system find the libpmem.so
- * that it uses. You can use an alternative libpmem by setting the
- * FIO_PMEM_LIB environment variable to the full path to the desired
- * libpmem.so. This engine requires PMDK >= 1.5.
+ * See examples/libpmem.fio for complete usage example.
*/
#include <stdio.h>
-#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/sysmacros.h>
-#include <libgen.h>
#include <libpmem.h>
#include "../fio.h"
{
struct thread_options *o = &td->o;
- dprint(FD_IO,"o->rw_min_bs %llu \n o->fsync_blocks %u \n o->fdatasync_blocks %u \n",
- o->rw_min_bs,o->fsync_blocks,o->fdatasync_blocks);
+ dprint(FD_IO, "o->rw_min_bs %llu\n o->fsync_blocks %u\n o->fdatasync_blocks %u\n",
+ o->rw_min_bs, o->fsync_blocks, o->fdatasync_blocks);
dprint(FD_IO, "DEBUG fio_libpmem_init\n");
if ((o->rw_min_bs & page_mask) &&
}
/*
- * This is the pmem_map_file execution function
+ * This is the pmem_map_file execution function, a helper to
+ * fio_libpmem_open_file function.
*/
static int fio_libpmem_file(struct thread_data *td, struct fio_file *f,
size_t length, off_t off)
{
struct fio_libpmem_data *fdd;
- dprint(FD_IO,"DEBUG fio_libpmem_open_file\n");
- dprint(FD_IO,"f->io_size=%ld \n",f->io_size);
- dprint(FD_IO,"td->o.size=%lld \n",td->o.size);
- dprint(FD_IO,"td->o.iodepth=%d\n",td->o.iodepth);
- dprint(FD_IO,"td->o.iodepth_batch=%d \n",td->o.iodepth_batch);
+ dprint(FD_IO, "DEBUG fio_libpmem_open_file\n");
+ dprint(FD_IO, "f->io_size=%ld\n", f->io_size);
+ dprint(FD_IO, "td->o.size=%lld\n", td->o.size);
+ dprint(FD_IO, "td->o.iodepth=%d\n", td->o.iodepth);
+ dprint(FD_IO, "td->o.iodepth_batch=%d\n", td->o.iodepth_batch);
if (fio_file_open(f))
td_io_close_file(td, f);
struct fio_file *f = io_u->file;
struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
- dprint(FD_IO, "DEBUG fio_libpmem_prep\n" );
- dprint(FD_IO," io_u->offset %llu : fdd->libpmem_off %ld : "
+ dprint(FD_IO, "DEBUG fio_libpmem_prep\n");
+ dprint(FD_IO, "io_u->offset %llu : fdd->libpmem_off %ld : "
"io_u->buflen %llu : fdd->libpmem_sz %ld\n",
io_u->offset, fdd->libpmem_off,
io_u->buflen, fdd->libpmem_sz);
io_u->error = 0;
dprint(FD_IO, "DEBUG fio_libpmem_queue\n");
- dprint(FD_IO,"td->o.odirect %d td->o.sync_io %d \n",td->o.odirect, td->o.sync_io);
- /* map both O_SYNC / DSYNC to not using NODRAIN */
+ dprint(FD_IO, "td->o.odirect %d td->o.sync_io %d\n",
+ td->o.odirect, td->o.sync_io);
+ /* map both O_SYNC / DSYNC to not use NODRAIN */
flags = td->o.sync_io ? 0 : PMEM_F_MEM_NODRAIN;
flags |= td->o.odirect ? PMEM_F_MEM_NONTEMPORAL : PMEM_F_MEM_TEMPORAL;
break;
case DDIR_WRITE:
dprint(FD_IO, "DEBUG mmap_data=%p, xfer_buf=%p\n",
- io_u->mmap_data, io_u->xfer_buf );
+ io_u->mmap_data, io_u->xfer_buf);
pmem_memcpy(io_u->mmap_data,
io_u->xfer_buf,
io_u->xfer_buflen,
struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
int ret = 0;
- dprint(FD_IO,"DEBUG fio_libpmem_close_file\n");
- dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect);
+ dprint(FD_IO, "DEBUG fio_libpmem_close_file\n");
+ dprint(FD_IO, "td->o.odirect %d\n", td->o.odirect);
if (!td->o.odirect) {
dprint(FD_IO,"pmem_drain\n");
[global]
bs=4k
-size=8g
+size=10g
ioengine=libpmem
norandommap
time_based
numjobs=1
runtime=300
-#
-# In case of 'scramble_buffers=1', the source buffer
-# is rewritten with a random value every write operations.
-#
-# But when 'scramble_buffers=0' is set, the source buffer isn't
-# rewritten. So it will be likely that the source buffer is in CPU
-# cache and it seems to be high performance.
-#
-scramble_buffers=0
-
#
# depends on direct option, flags are set for pmem_memcpy() call:
# direct=1 - PMEM_F_MEM_NONTEMPORAL,
#
sync=1
+#
+# In case of 'scramble_buffers=1', the source buffer
+# is rewritten with a random value every write operation.
+#
+# But when 'scramble_buffers=0' is set, the source buffer isn't
+# rewritten. So it will be likely that the source buffer is in CPU
+# cache and it seems to be high write performance.
+#
+scramble_buffers=1
#
-# Setting for fio process's CPU Node and Memory Node
+# Setting for fio process's CPU Node and Memory Node.
+# Set proper node below or use `numactl` command along with FIO.
#
numa_cpu_nodes=0
numa_mem_policy=bind:0
#
# The libpmem engine does IO to files in a DAX-mounted filesystem.
-# The filesystem should be created on an NVDIMM (e.g /dev/pmem0)
+# The filesystem should be created on a Non-Volatile DIMM (e.g /dev/pmem0)
# and then mounted with the '-o dax' option. Note that the engine
# accesses the underlying NVDIMM directly, bypassing the kernel block
# layer, so the usual filesystem/disk performance monitoring tools such
# as iostat will not provide useful data.
#
-directory=/mnt/pmem0
+#filename=/mnt/pmem/somefile
+directory=/mnt/pmem
[libpmem-seqwrite]
rw=write
stonewall
-#[libpmem-seqread]
-#rw=read
-#stonewall
+[libpmem-seqread]
+rw=read
+stonewall
#[libpmem-randwrite]
#rw=randwrite