stat: fix accumulation of latency buckets
[fio.git] / engines / libpmem.c
index 3ba3bfe2fe8d8ce1c3fa01ef9637b31900b15d05..99c7b50ddc23a1f188d1a30e0fb249a38afbdfae 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * libpmem: IO engine that uses NVML libpmem to read and write data
+ * libpmem: IO engine that uses PMDK libpmem to read and write data
  *
  * Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
  *
@@ -81,10 +81,9 @@ struct fio_libpmem_data {
 #define PROCMAXLEN 2048 /* maximum expected line length in /proc files */
 #define roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
 
-static int Mmap_no_random;
+static bool Mmap_no_random;
 static void *Mmap_hint;
 static unsigned long long Mmap_align;
-static unsigned long long Pagesize = 0;
 
 /*
  * util_map_hint_align -- choose the desired mapping alignment
@@ -92,23 +91,11 @@ static unsigned long long Pagesize = 0;
  * Use 2MB/1GB page alignment only if the mapping length is at least
  * twice as big as the page size.
  */
-static inline size_t
-util_map_hint_align(size_t len, size_t req_align)
+static inline size_t util_map_hint_align(size_t len, size_t req_align)
 {
-       size_t align = 0;
+       size_t align = Mmap_align;
 
        dprint(FD_IO, "DEBUG util_map_hint_align\n" );
-#ifndef WIN32
-       Mmap_align = Pagesize;
-#else
-       if (Mmap_align == 0) {
-               SYSTEM_INFO si;
-               GetSystemInfo(&si);
-               Mmap_align = si.dwAllocationGranularity;
-       }
-#endif
-
-       align = Mmap_align;
 
        if (req_align)
                align = req_align;
@@ -144,7 +131,7 @@ static const char *sscanf_os = "%p-%p";
  * mappings.  It is not an error if mmap() ignores the hint and chooses
  * different address.
  */
-static char * util_map_hint_unused(void *minaddr, size_t len, size_t align)
+static char *util_map_hint_unused(void *minaddr, size_t len, size_t align)
 {
        char *lo = NULL;        /* beginning of current range in maps file */
        char *hi = NULL;        /* end of current range in maps file */
@@ -160,11 +147,8 @@ static char * util_map_hint_unused(void *minaddr, size_t len, size_t align)
        dprint(FD_IO, "DEBUG util_map_hint_unused\n");
        assert(align > 0);
 
-       /* XXX - replace sysconf() with util_get_sys_xxx() */
-       Pagesize = (unsigned long) sysconf(_SC_PAGESIZE);
-
        if (raddr == NULL)
-               raddr += Pagesize;
+               raddr += page_size;
 
        raddr = (char *)roundup((uintptr_t)raddr, align);
 
@@ -192,7 +176,8 @@ static char * util_map_hint_unused(void *minaddr, size_t len, size_t align)
        dprint(FD_IO, "end of address space reached");
        return MAP_FAILED;
 #else
-       if ((fp = fopen(OS_MAPFILE, "r")) == NULL) {
+       fp = fopen(OS_MAPFILE, "r");
+       if (!fp) {
                log_err("!%s\n", OS_MAPFILE);
                return MAP_FAILED;
        }
@@ -265,7 +250,7 @@ static char * util_map_hint_unused(void *minaddr, size_t len, size_t align)
  *   the first unused, properly aligned region of given size, above the
  *   specified address.
  */
-static char * util_map_hint(size_t len, size_t req_align)
+static char *util_map_hint(size_t len, size_t req_align)
 {
        char *addr;
        size_t align = 0;
@@ -289,7 +274,7 @@ static char * util_map_hint(size_t len, size_t req_align)
                        dprint(FD_IO, "Invalid PMEM_MMAP_HINT\n");
                } else {
                        Mmap_hint = (void *)val;
-                       Mmap_no_random = 1;
+                       Mmap_no_random = true;
                        dprint(FD_IO, "PMEM_MMAP_HINT set to %p\n", Mmap_hint);
                }
        }
@@ -415,7 +400,7 @@ static int fio_libpmem_prep_full(struct thread_data *td, struct io_u *io_u)
                        f->io_size, io_u->offset);
 
        if (io_u->offset != (size_t) io_u->offset ||
-                       f->io_size != (size_t) f->io_size) {
+           f->io_size != (size_t) f->io_size) {
                fio_file_set_partial_mmap(f);
                return EINVAL;
        }
@@ -439,14 +424,14 @@ static int fio_libpmem_prep(struct thread_data *td, struct io_u *io_u)
        /*
         * It fits within existing mapping, use it
         */
-       dprint(FD_IO," io_u->offset %lld : fdd->libpmem_off %ld : "
-                       "io_u->buflen %ld : fdd->libpmem_sz %ld\n",
-                       io_u->offset, fdd->libpmem_off,
-                       io_u->buflen, fdd->libpmem_sz);
+       dprint(FD_IO," io_u->offset %llu : fdd->libpmem_off %llu : "
+                       "io_u->buflen %llu : fdd->libpmem_sz %llu\n",
+                       io_u->offset, (unsigned long long) fdd->libpmem_off,
+                       io_u->buflen, (unsigned long long) fdd->libpmem_sz);
 
        if (io_u->offset >= fdd->libpmem_off &&
-                       io_u->offset + io_u->buflen <
-                       fdd->libpmem_off + fdd->libpmem_sz)
+           (io_u->offset + io_u->buflen <=
+            fdd->libpmem_off + fdd->libpmem_sz))
                goto done;
 
        /*
@@ -468,11 +453,12 @@ static int fio_libpmem_prep(struct thread_data *td, struct io_u *io_u)
 
 done:
        io_u->mmap_data = fdd->libpmem_ptr + io_u->offset - fdd->libpmem_off
-               - f->file_offset;
+                               - f->file_offset;
        return 0;
 }
 
-static int fio_libpmem_queue(struct thread_data *td, struct io_u *io_u)
+static enum fio_q_status fio_libpmem_queue(struct thread_data *td,
+                                          struct io_u *io_u)
 {
        fio_ro_check(td, io_u);
        io_u->error = 0;
@@ -480,30 +466,30 @@ static int fio_libpmem_queue(struct thread_data *td, struct io_u *io_u)
        dprint(FD_IO, "DEBUG fio_libpmem_queue\n");
 
        switch (io_u->ddir) {
-               case DDIR_READ:
-                       memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
-                       break;
-               case DDIR_WRITE:
-                       dprint(FD_IO, "DEBUG mmap_data=%p, xfer_buf=%p\n",
-                                       io_u->mmap_data, io_u->xfer_buf );
-                       dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect);
-                       if(td->o.odirect == 1){
-                               pmem_memcpy_persist(io_u->mmap_data,
+       case DDIR_READ:
+               memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
+               break;
+       case DDIR_WRITE:
+               dprint(FD_IO, "DEBUG mmap_data=%p, xfer_buf=%p\n",
+                               io_u->mmap_data, io_u->xfer_buf );
+               dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect);
+               if (td->o.odirect) {
+                       pmem_memcpy_persist(io_u->mmap_data,
                                                io_u->xfer_buf,
                                                io_u->xfer_buflen);
-                       } else {
-                               pmem_memcpy_nodrain(io_u->mmap_data,
+               } else {
+                       pmem_memcpy_nodrain(io_u->mmap_data,
                                                io_u->xfer_buf,
                                                io_u->xfer_buflen);
-                       }
-                       break;
-               case DDIR_SYNC:
-               case DDIR_DATASYNC:
-               case DDIR_SYNC_FILE_RANGE:
-                       break;
-               default:
-                       io_u->error = EINVAL;
-                       break;
+               }
+               break;
+       case DDIR_SYNC:
+       case DDIR_DATASYNC:
+       case DDIR_SYNC_FILE_RANGE:
+               break;
+       default:
+               io_u->error = EINVAL;
+               break;
        }
 
        return FIO_Q_COMPLETED;
@@ -513,12 +499,12 @@ static int fio_libpmem_init(struct thread_data *td)
 {
        struct thread_options *o = &td->o;
 
-       dprint(FD_IO,"o->rw_min_bs %d \n o->fsync_blocks %d \n o->fdatasync_blocks %d \n",
+       dprint(FD_IO,"o->rw_min_bs %llu \n o->fsync_blocks %d \n o->fdatasync_blocks %d \n",
                        o->rw_min_bs,o->fsync_blocks,o->fdatasync_blocks);
        dprint(FD_IO, "DEBUG fio_libpmem_init\n");
 
        if ((o->rw_min_bs & page_mask) &&
-                       (o->fsync_blocks || o->fdatasync_blocks)) {
+           (o->fsync_blocks || o->fdatasync_blocks)) {
                log_err("libpmem: mmap options dictate a minimum block size of "
                                "%llu bytes\n", (unsigned long long) page_size);
                return 1;
@@ -560,7 +546,7 @@ static int fio_libpmem_close_file(struct thread_data *td, struct fio_file *f)
        dprint(FD_IO,"DEBUG fio_libpmem_close_file\n");
        dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect);
 
-       if (td->o.odirect != 1) {
+       if (!td->o.odirect) {
                dprint(FD_IO,"pmem_drain\n");
                pmem_drain();
        }
@@ -573,19 +559,30 @@ static int fio_libpmem_close_file(struct thread_data *td, struct fio_file *f)
 }
 
 static struct ioengine_ops ioengine = {
-       .name           = "libpmem",
-       .version        = FIO_IOOPS_VERSION,
-       .init           = fio_libpmem_init,
-       .prep           = fio_libpmem_prep,
-       .queue          = fio_libpmem_queue,
-       .open_file      = fio_libpmem_open_file,
-       .close_file     = fio_libpmem_close_file,
-       .get_file_size  = generic_get_file_size,
-       .flags          = FIO_SYNCIO |FIO_NOEXTEND,
+       .name           = "libpmem",
+       .version        = FIO_IOOPS_VERSION,
+       .init           = fio_libpmem_init,
+       .prep           = fio_libpmem_prep,
+       .queue          = fio_libpmem_queue,
+       .open_file      = fio_libpmem_open_file,
+       .close_file     = fio_libpmem_close_file,
+       .get_file_size  = generic_get_file_size,
+       .flags          = FIO_SYNCIO |FIO_NOEXTEND,
 };
 
 static void fio_init fio_libpmem_register(void)
 {
+#ifndef WIN32
+       Mmap_align = page_size;
+#else
+       if (Mmap_align == 0) {
+               SYSTEM_INFO si;
+
+               GetSystemInfo(&si);
+               Mmap_align = si.dwAllocationGranularity;
+       }
+#endif
+
        register_ioengine(&ioengine);
 }