examples: remove test.png
[fio.git] / engines / libblkio.c
index 6decfd39e49f8111077fece183068235527ac0f7..054aa8001ad1dafc685104e0a65d92d364329d06 100644 (file)
 #include "../options.h"
 #include "../parse.h"
 
+/* per-process state */
+static struct {
+       pthread_mutex_t mutex;
+       int initted_threads;
+       int initted_hipri_threads;
+       struct blkio *b;
+} proc_state = { PTHREAD_MUTEX_INITIALIZER, 0, 0, NULL };
+
+static void fio_blkio_proc_lock(void) {
+       int ret;
+       ret = pthread_mutex_lock(&proc_state.mutex);
+       assert(ret == 0);
+}
+
+static void fio_blkio_proc_unlock(void) {
+       int ret;
+       ret = pthread_mutex_unlock(&proc_state.mutex);
+       assert(ret == 0);
+}
+
 /* per-thread state */
 struct fio_blkio_data {
-       struct blkio *b;
        struct blkioq *q;
+       int completion_fd; /* may be -1 if not FIO_BLKIO_WAIT_MODE_EVENTFD */
 
        bool has_mem_region; /* whether mem_region is valid */
        struct blkio_mem_region mem_region; /* only if allocated by libblkio */
 
+       struct iovec *iovecs; /* for vectored requests */
        struct blkio_completion *completions;
 };
 
+enum fio_blkio_wait_mode {
+       FIO_BLKIO_WAIT_MODE_BLOCK,
+       FIO_BLKIO_WAIT_MODE_EVENTFD,
+       FIO_BLKIO_WAIT_MODE_LOOP,
+};
+
 struct fio_blkio_options {
        void *pad; /* option fields must not have offset 0 */
 
        char *driver;
+
+       char *path;
        char *pre_connect_props;
+
+       int num_entries;
+       int queue_size;
        char *pre_start_props;
 
        unsigned int hipri;
+       unsigned int vectored;
+       unsigned int write_zeroes_on_trim;
+       enum fio_blkio_wait_mode wait_mode;
+       unsigned int force_enable_completion_eventfd;
 };
 
 static struct fio_option options[] = {
@@ -51,18 +87,49 @@ static struct fio_option options[] = {
                .category = FIO_OPT_C_ENGINE,
                .group  = FIO_OPT_G_LIBBLKIO,
        },
+       {
+               .name   = "libblkio_path",
+               .lname  = "libblkio \"path\" property",
+               .type   = FIO_OPT_STR_STORE,
+               .off1   = offsetof(struct fio_blkio_options, path),
+               .help   = "Value to set the \"path\" property to",
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
        {
                .name   = "libblkio_pre_connect_props",
-               .lname  = "Properties to be set before blkio_connect()",
+               .lname  = "Additional properties to be set before blkio_connect()",
                .type   = FIO_OPT_STR_STORE,
                .off1   = offsetof(struct fio_blkio_options, pre_connect_props),
                .help   = "",
                .category = FIO_OPT_C_ENGINE,
                .group  = FIO_OPT_G_LIBBLKIO,
        },
+       {
+               .name   = "libblkio_num_entries",
+               .lname  = "libblkio \"num-entries\" property",
+               .type   = FIO_OPT_INT,
+               .off1   = offsetof(struct fio_blkio_options, num_entries),
+               .help   = "Value to set the \"num-entries\" property to",
+               .minval = 1,
+               .interval = 1,
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
+       {
+               .name   = "libblkio_queue_size",
+               .lname  = "libblkio \"queue-size\" property",
+               .type   = FIO_OPT_INT,
+               .off1   = offsetof(struct fio_blkio_options, queue_size),
+               .help   = "Value to set the \"queue-size\" property to",
+               .minval = 1,
+               .interval = 1,
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
        {
                .name   = "libblkio_pre_start_props",
-               .lname  = "Properties to be set before blkio_start()",
+               .lname  = "Additional properties to be set before blkio_start()",
                .type   = FIO_OPT_STR_STORE,
                .off1   = offsetof(struct fio_blkio_options, pre_start_props),
                .help   = "",
@@ -78,6 +145,59 @@ static struct fio_option options[] = {
                .category = FIO_OPT_C_ENGINE,
                .group  = FIO_OPT_G_LIBBLKIO,
        },
+       {
+               .name   = "libblkio_vectored",
+               .lname  = "Use blkioq_{readv,writev}()",
+               .type   = FIO_OPT_STR_SET,
+               .off1   = offsetof(struct fio_blkio_options, vectored),
+               .help   = "Use blkioq_{readv,writev}() instead of blkioq_{read,write}()",
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
+       {
+               .name   = "libblkio_write_zeroes_on_trim",
+               .lname  = "Use blkioq_write_zeroes() for TRIM",
+               .type   = FIO_OPT_STR_SET,
+               .off1   = offsetof(struct fio_blkio_options,
+                                  write_zeroes_on_trim),
+               .help   = "Use blkioq_write_zeroes() for TRIM instead of blkioq_discard()",
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
+       {
+               .name   = "libblkio_wait_mode",
+               .lname  = "How to wait for completions",
+               .type   = FIO_OPT_STR,
+               .off1   = offsetof(struct fio_blkio_options, wait_mode),
+               .help   = "How to wait for completions",
+               .def    = "block",
+               .posval = {
+                         { .ival = "block",
+                           .oval = FIO_BLKIO_WAIT_MODE_BLOCK,
+                           .help = "Blocking blkioq_do_io()",
+                         },
+                         { .ival = "eventfd",
+                           .oval = FIO_BLKIO_WAIT_MODE_EVENTFD,
+                           .help = "Blocking read() on the completion eventfd",
+                         },
+                         { .ival = "loop",
+                           .oval = FIO_BLKIO_WAIT_MODE_LOOP,
+                           .help = "Busy loop with non-blocking blkioq_do_io()",
+                         },
+               },
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
+       {
+               .name   = "libblkio_force_enable_completion_eventfd",
+               .lname  = "Force enable the completion eventfd, even if unused",
+               .type   = FIO_OPT_STR_SET,
+               .off1   = offsetof(struct fio_blkio_options,
+                                  force_enable_completion_eventfd),
+               .help   = "This can impact performance",
+               .category = FIO_OPT_C_ENGINE,
+               .group  = FIO_OPT_G_LIBBLKIO,
+       },
        {
                .name = NULL,
        },
@@ -151,6 +271,106 @@ static int fio_blkio_set_props_from_str(struct blkio *b, const char *opt_name,
                        blkio_get_error_msg()); \
        })
 
+static bool possibly_null_strs_equal(const char *a, const char *b)
+{
+       return (!a && !b) || (a && b && strcmp(a, b) == 0);
+}
+
+/*
+ * Returns the total number of subjobs using the 'libblkio' ioengine and setting
+ * the 'thread' option in the entire workload that have the given value for the
+ * 'hipri' option.
+ */
+static int total_threaded_subjobs(bool hipri)
+{
+       struct thread_data *td;
+       unsigned int i;
+       int count = 0;
+
+       for_each_td(td, i) {
+               const struct fio_blkio_options *options = td->eo;
+               if (strcmp(td->o.ioengine, "libblkio") == 0 &&
+                   td->o.use_thread && (bool)options->hipri == hipri)
+                       ++count;
+       }
+
+       return count;
+}
+
+static struct {
+       bool set_up;
+       bool direct;
+       struct fio_blkio_options opts;
+} first_threaded_subjob = { 0 };
+
+static void fio_blkio_log_opt_compat_err(const char *option_name)
+{
+       log_err("fio: jobs using engine libblkio and sharing a process must agree on the %s option\n",
+               option_name);
+}
+
+/*
+ * If td represents a subjob with option 'thread', check if its options are
+ * compatible with those of other threaded subjobs that were already set up.
+ */
+static int fio_blkio_check_opt_compat(struct thread_data *td)
+{
+       const struct fio_blkio_options *options = td->eo, *prev_options;
+
+       if (!td->o.use_thread)
+               return 0; /* subjob doesn't use 'thread' */
+
+       if (!first_threaded_subjob.set_up) {
+               /* first subjob using 'thread', store options for later */
+               first_threaded_subjob.set_up    = true;
+               first_threaded_subjob.direct    = td->o.odirect;
+               first_threaded_subjob.opts      = *options;
+               return 0;
+       }
+
+       /* not first subjob using 'thread', check option compatibility */
+       prev_options = &first_threaded_subjob.opts;
+
+       if (td->o.odirect != first_threaded_subjob.direct) {
+               fio_blkio_log_opt_compat_err("direct/buffered");
+               return 1;
+       }
+
+       if (strcmp(options->driver, prev_options->driver) != 0) {
+               fio_blkio_log_opt_compat_err("libblkio_driver");
+               return 1;
+       }
+
+       if (!possibly_null_strs_equal(options->path, prev_options->path)) {
+               fio_blkio_log_opt_compat_err("libblkio_path");
+               return 1;
+       }
+
+       if (!possibly_null_strs_equal(options->pre_connect_props,
+                                     prev_options->pre_connect_props)) {
+               fio_blkio_log_opt_compat_err("libblkio_pre_connect_props");
+               return 1;
+       }
+
+       if (options->num_entries != prev_options->num_entries) {
+               fio_blkio_log_opt_compat_err("libblkio_num_entries");
+               return 1;
+       }
+
+       if (options->queue_size != prev_options->queue_size) {
+               fio_blkio_log_opt_compat_err("libblkio_queue_size");
+               return 1;
+       }
+
+       if (!possibly_null_strs_equal(options->pre_start_props,
+                                     prev_options->pre_start_props)) {
+               fio_blkio_log_opt_compat_err("libblkio_pre_start_props");
+               return 1;
+       }
+
+       return 0;
+}
+
 static int fio_blkio_create_and_connect(struct thread_data *td,
                                        struct blkio **out_blkio)
 {
@@ -180,6 +400,13 @@ static int fio_blkio_create_and_connect(struct thread_data *td,
                goto err_blkio_destroy;
        }
 
+       if (options->path) {
+               if (blkio_set_str(b, "path", options->path) != 0) {
+                       fio_blkio_log_err(blkio_set_str);
+                       goto err_blkio_destroy;
+               }
+       }
+
        if (fio_blkio_set_props_from_str(b, "libblkio_pre_connect_props",
                                         options->pre_connect_props) != 0)
                goto err_blkio_destroy;
@@ -189,6 +416,21 @@ static int fio_blkio_create_and_connect(struct thread_data *td,
                goto err_blkio_destroy;
        }
 
+       if (options->num_entries != 0) {
+               if (blkio_set_int(b, "num-entries",
+                                 options->num_entries) != 0) {
+                       fio_blkio_log_err(blkio_set_int);
+                       goto err_blkio_destroy;
+               }
+       }
+
+       if (options->queue_size != 0) {
+               if (blkio_set_int(b, "queue-size", options->queue_size) != 0) {
+                       fio_blkio_log_err(blkio_set_int);
+                       goto err_blkio_destroy;
+               }
+       }
+
        if (fio_blkio_set_props_from_str(b, "libblkio_pre_start_props",
                                         options->pre_start_props) != 0)
                goto err_blkio_destroy;
@@ -201,6 +443,8 @@ err_blkio_destroy:
        return 1;
 }
 
+static bool incompatible_threaded_subjob_options = false;
+
 /*
  * This callback determines the device/file size, so it creates and connects a
  * blkio instance. But it is invoked from the main thread in the original fio
@@ -209,12 +453,29 @@ err_blkio_destroy:
  */
 static int fio_blkio_setup(struct thread_data *td)
 {
+       const struct fio_blkio_options *options = td->eo;
        struct blkio *b;
        int ret = 0;
        uint64_t capacity;
 
        assert(td->files_index == 1);
 
+       if (fio_blkio_check_opt_compat(td) != 0) {
+               incompatible_threaded_subjob_options = true;
+               return 1;
+       }
+
+       if (options->hipri &&
+               options->wait_mode == FIO_BLKIO_WAIT_MODE_EVENTFD) {
+               log_err("fio: option hipri is incompatible with option libblkio_wait_mode=eventfd\n");
+               return 1;
+       }
+
+       if (options->hipri && options->force_enable_completion_eventfd) {
+               log_err("fio: option hipri is incompatible with option libblkio_force_enable_completion_eventfd\n");
+               return 1;
+       }
+
        if (fio_blkio_create_and_connect(td, &b) != 0)
                return 1;
 
@@ -236,6 +497,16 @@ static int fio_blkio_init(struct thread_data *td)
 {
        const struct fio_blkio_options *options = td->eo;
        struct fio_blkio_data *data;
+       int flags;
+
+       if (td->o.use_thread && incompatible_threaded_subjob_options) {
+               /*
+                * Different subjobs using option 'thread' specified
+                * incompatible options. We don't know which configuration
+                * should win, so we just fail all such subjobs.
+                */
+               return 1;
+       }
 
        /*
         * Request enqueueing is fast, and it's not possible to know exactly
@@ -249,45 +520,101 @@ static int fio_blkio_init(struct thread_data *td)
                return 1;
        }
 
+       data->iovecs = calloc(td->o.iodepth, sizeof(data->iovecs[0]));
        data->completions = calloc(td->o.iodepth, sizeof(data->completions[0]));
-       if (!data->completions) {
+       if (!data->iovecs || !data->completions) {
                log_err("fio: calloc() failed\n");
                goto err_free;
        }
 
-       if (fio_blkio_create_and_connect(td, &data->b) != 0)
-               goto err_free;
+       fio_blkio_proc_lock();
 
-       if (blkio_set_int(data->b, "num-queues", options->hipri ? 0 : 1) != 0) {
-               fio_blkio_log_err(blkio_set_int);
-               goto err_blkio_destroy;
+       if (proc_state.initted_threads == 0) {
+               /* initialize per-process blkio */
+               int num_queues, num_poll_queues;
+
+               if (td->o.use_thread) {
+                       num_queues      = total_threaded_subjobs(false);
+                       num_poll_queues = total_threaded_subjobs(true);
+               } else {
+                       num_queues      = options->hipri ? 0 : 1;
+                       num_poll_queues = options->hipri ? 1 : 0;
+               }
+
+               if (fio_blkio_create_and_connect(td, &proc_state.b) != 0)
+                       goto err_unlock;
+
+               if (blkio_set_int(proc_state.b, "num-queues",
+                                 num_queues) != 0) {
+                       fio_blkio_log_err(blkio_set_int);
+                       goto err_blkio_destroy;
+               }
+
+               if (blkio_set_int(proc_state.b, "num-poll-queues",
+                                 num_poll_queues) != 0) {
+                       fio_blkio_log_err(blkio_set_int);
+                       goto err_blkio_destroy;
+               }
+
+               if (blkio_start(proc_state.b) != 0) {
+                       fio_blkio_log_err(blkio_start);
+                       goto err_blkio_destroy;
+               }
        }
 
-       if (blkio_set_int(data->b, "num-poll-queues",
-                         options->hipri ? 1 : 0) != 0) {
-               fio_blkio_log_err(blkio_set_int);
-               goto err_blkio_destroy;
+       if (options->hipri) {
+               int i = proc_state.initted_hipri_threads;
+               data->q = blkio_get_poll_queue(proc_state.b, i);
+       } else {
+               int i = proc_state.initted_threads -
+                               proc_state.initted_hipri_threads;
+               data->q = blkio_get_queue(proc_state.b, i);
        }
 
-       if (blkio_start(data->b) != 0) {
-               fio_blkio_log_err(blkio_start);
-               goto err_blkio_destroy;
+       if (options->wait_mode == FIO_BLKIO_WAIT_MODE_EVENTFD ||
+               options->force_enable_completion_eventfd) {
+               /* enable completion fd and make it blocking */
+               blkioq_set_completion_fd_enabled(data->q, true);
+               data->completion_fd = blkioq_get_completion_fd(data->q);
+
+               flags = fcntl(data->completion_fd, F_GETFL);
+               if (flags < 0) {
+                       log_err("fio: fcntl(F_GETFL) failed: %s\n",
+                               strerror(errno));
+                       goto err_blkio_destroy;
+               }
+
+               if (fcntl(data->completion_fd, F_SETFL,
+                         flags & ~O_NONBLOCK) != 0) {
+                       log_err("fio: fcntl(F_SETFL) failed: %s\n",
+                               strerror(errno));
+                       goto err_blkio_destroy;
+               }
+       } else {
+               data->completion_fd = -1;
        }
 
+       ++proc_state.initted_threads;
        if (options->hipri)
-               data->q = blkio_get_poll_queue(data->b, 0);
-       else
-               data->q = blkio_get_queue(data->b, 0);
+               ++proc_state.initted_hipri_threads;
 
        /* Set data last so cleanup() does nothing if init() fails. */
        td->io_ops_data = data;
 
+       fio_blkio_proc_unlock();
+
        return 0;
 
 err_blkio_destroy:
-       blkio_destroy(&data->b);
+       if (proc_state.initted_threads == 0)
+               blkio_destroy(&proc_state.b);
+err_unlock:
+       if (proc_state.initted_threads == 0)
+               proc_state.b = NULL;
+       fio_blkio_proc_unlock();
 err_free:
        free(data->completions);
+       free(data->iovecs);
        free(data);
        return 1;
 }
@@ -324,7 +651,7 @@ static int fio_blkio_post_init(struct thread_data *td)
                        .fd     = -1,
                };
 
-               if (blkio_map_mem_region(data->b, &region) != 0) {
+               if (blkio_map_mem_region(proc_state.b, &region) != 0) {
                        fio_blkio_log_err(blkio_map_mem_region);
                        return 1;
                }
@@ -337,10 +664,25 @@ static void fio_blkio_cleanup(struct thread_data *td)
 {
        struct fio_blkio_data *data = td->io_ops_data;
 
+       /*
+        * Subjobs from different jobs can be terminated at different times, so
+        * this callback may be invoked for one subjob while another is still
+        * doing I/O. Those subjobs may share the process, so we must wait until
+        * the last subjob in the process wants to clean up to actually destroy
+        * the blkio.
+        */
+
        if (data) {
-               blkio_destroy(&data->b);
                free(data->completions);
+               free(data->iovecs);
                free(data);
+
+               fio_blkio_proc_lock();
+               if (--proc_state.initted_threads == 0) {
+                       blkio_destroy(&proc_state.b);
+                       proc_state.b = NULL;
+               }
+               fio_blkio_proc_unlock();
        }
 }
 
@@ -352,7 +694,7 @@ static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
        int ret;
        uint64_t mem_region_alignment;
 
-       if (blkio_get_uint64(data->b, "mem-region-alignment",
+       if (blkio_get_uint64(proc_state.b, "mem-region-alignment",
                             &mem_region_alignment) != 0) {
                fio_blkio_log_err(blkio_get_uint64);
                return 1;
@@ -361,13 +703,16 @@ static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
        /* round up size to satisfy mem-region-alignment */
        size = align_up(size, (size_t)mem_region_alignment);
 
-       if (blkio_alloc_mem_region(data->b, &data->mem_region, size) != 0) {
+       fio_blkio_proc_lock();
+
+       if (blkio_alloc_mem_region(proc_state.b, &data->mem_region,
+                                  size) != 0) {
                fio_blkio_log_err(blkio_alloc_mem_region);
                ret = 1;
                goto out;
        }
 
-       if (blkio_map_mem_region(data->b, &data->mem_region) != 0) {
+       if (blkio_map_mem_region(proc_state.b, &data->mem_region) != 0) {
                fio_blkio_log_err(blkio_map_mem_region);
                ret = 1;
                goto out_free;
@@ -380,8 +725,9 @@ static int fio_blkio_iomem_alloc(struct thread_data *td, size_t size)
        goto out;
 
 out_free:
-       blkio_free_mem_region(data->b, &data->mem_region);
+       blkio_free_mem_region(proc_state.b, &data->mem_region);
 out:
+       fio_blkio_proc_unlock();
        return ret;
 }
 
@@ -390,8 +736,10 @@ static void fio_blkio_iomem_free(struct thread_data *td)
        struct fio_blkio_data *data = td->io_ops_data;
 
        if (data && data->has_mem_region) {
-               blkio_unmap_mem_region(data->b, &data->mem_region);
-               blkio_free_mem_region(data->b, &data->mem_region);
+               fio_blkio_proc_lock();
+               blkio_unmap_mem_region(proc_state.b, &data->mem_region);
+               blkio_free_mem_region(proc_state.b, &data->mem_region);
+               fio_blkio_proc_unlock();
 
                data->has_mem_region = false;
        }
@@ -405,22 +753,49 @@ static int fio_blkio_open_file(struct thread_data *td, struct fio_file *f)
 static enum fio_q_status fio_blkio_queue(struct thread_data *td,
                                         struct io_u *io_u)
 {
+       const struct fio_blkio_options *options = td->eo;
        struct fio_blkio_data *data = td->io_ops_data;
 
        fio_ro_check(td, io_u);
 
        switch (io_u->ddir) {
                case DDIR_READ:
-                       blkioq_read(data->q, io_u->offset, io_u->xfer_buf,
-                                   (size_t)io_u->xfer_buflen, io_u, 0);
+                       if (options->vectored) {
+                               struct iovec *iov = &data->iovecs[io_u->index];
+                               iov->iov_base = io_u->xfer_buf;
+                               iov->iov_len = (size_t)io_u->xfer_buflen;
+
+                               blkioq_readv(data->q, io_u->offset, iov, 1,
+                                            io_u, 0);
+                       } else {
+                               blkioq_read(data->q, io_u->offset,
+                                           io_u->xfer_buf,
+                                           (size_t)io_u->xfer_buflen, io_u, 0);
+                       }
                        break;
                case DDIR_WRITE:
-                       blkioq_write(data->q, io_u->offset, io_u->xfer_buf,
-                                    (size_t)io_u->xfer_buflen, io_u, 0);
+                       if (options->vectored) {
+                               struct iovec *iov = &data->iovecs[io_u->index];
+                               iov->iov_base = io_u->xfer_buf;
+                               iov->iov_len = (size_t)io_u->xfer_buflen;
+
+                               blkioq_writev(data->q, io_u->offset, iov, 1,
+                                             io_u, 0);
+                       } else {
+                               blkioq_write(data->q, io_u->offset,
+                                            io_u->xfer_buf,
+                                            (size_t)io_u->xfer_buflen, io_u,
+                                            0);
+                       }
                        break;
                case DDIR_TRIM:
-                       blkioq_discard(data->q, io_u->offset, io_u->xfer_buflen,
-                                      io_u, 0);
+                       if (options->write_zeroes_on_trim) {
+                               blkioq_write_zeroes(data->q, io_u->offset,
+                                                   io_u->xfer_buflen, io_u, 0);
+                       } else {
+                               blkioq_discard(data->q, io_u->offset,
+                                              io_u->xfer_buflen, io_u, 0);
+                       }
                        break;
                case DDIR_SYNC:
                case DDIR_DATASYNC:
@@ -438,16 +813,59 @@ static enum fio_q_status fio_blkio_queue(struct thread_data *td,
 static int fio_blkio_getevents(struct thread_data *td, unsigned int min,
                               unsigned int max, const struct timespec *t)
 {
+       const struct fio_blkio_options *options = td->eo;
        struct fio_blkio_data *data = td->io_ops_data;
-       int n;
-
-       n = blkioq_do_io(data->q, data->completions, (int)min, (int)max, NULL);
-       if (n < 0) {
-               fio_blkio_log_err(blkioq_do_io);
+       int ret, n;
+       uint64_t event;
+
+       switch (options->wait_mode) {
+       case FIO_BLKIO_WAIT_MODE_BLOCK:
+               n = blkioq_do_io(data->q, data->completions, (int)min, (int)max,
+                                NULL);
+               if (n < 0) {
+                       fio_blkio_log_err(blkioq_do_io);
+                       return -1;
+               }
+               return n;
+       case FIO_BLKIO_WAIT_MODE_EVENTFD:
+               n = blkioq_do_io(data->q, data->completions, 0, (int)max, NULL);
+               if (n < 0) {
+                       fio_blkio_log_err(blkioq_do_io);
+                       return -1;
+               }
+               while (n < (int)min) {
+                       ret = read(data->completion_fd, &event, sizeof(event));
+                       if (ret != sizeof(event)) {
+                               log_err("fio: read() on the completion fd returned %d\n",
+                                       ret);
+                               return -1;
+                       }
+
+                       ret = blkioq_do_io(data->q, data->completions + n, 0,
+                                          (int)max - n, NULL);
+                       if (ret < 0) {
+                               fio_blkio_log_err(blkioq_do_io);
+                               return -1;
+                       }
+
+                       n += ret;
+               }
+               return n;
+       case FIO_BLKIO_WAIT_MODE_LOOP:
+               for (n = 0; n < (int)min; ) {
+                       ret = blkioq_do_io(data->q, data->completions + n, 0,
+                                          (int)max - n, NULL);
+                       if (ret < 0) {
+                               fio_blkio_log_err(blkioq_do_io);
+                               return -1;
+                       }
+
+                       n += ret;
+               }
+               return n;
+       default:
                return -1;
        }
-
-       return n;
 }
 
 static struct io_u *fio_blkio_event(struct thread_data *td, int event)