Fixup bw/iops logging for short runs
[fio.git] / backend.c
index 3430e6612d4312740f31045d6ebf6ff4ff96950b..0a42da3ec440673d741da6ce56e235e997caf1b6 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -54,6 +54,8 @@
 #include "idletime.h"
 #include "err.h"
 #include "lib/tp.h"
+#include "workqueue.h"
+#include "lib/mountcheck.h"
 
 static pthread_t helper_thread;
 static pthread_mutex_t helper_lock;
@@ -98,7 +100,7 @@ static void sig_int(int sig)
        }
 }
 
-static void sig_show_status(int sig)
+void sig_show_status(int sig)
 {
        show_running_run_stats();
 }
@@ -177,7 +179,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now,
                if (spent < td->o.ratecycle)
                        return 0;
 
-               if (td->o.rate[ddir]) {
+               if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
                        /*
                         * check bandwidth specified rate
                         */
@@ -218,6 +220,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now,
                                        log_err("%s: min iops rate %u not met,"
                                                " got %lu\n", td->o.name,
                                                        rate_iops_min, rate);
+                                       return 1;
                                }
                        }
                }
@@ -229,16 +232,15 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now,
        return 0;
 }
 
-static int check_min_rate(struct thread_data *td, struct timeval *now,
-                         uint64_t *bytes_done)
+static int check_min_rate(struct thread_data *td, struct timeval *now)
 {
        int ret = 0;
 
-       if (bytes_done[DDIR_READ])
+       if (td->bytes_done[DDIR_READ])
                ret |= __check_min_rate(td, now, DDIR_READ);
-       if (bytes_done[DDIR_WRITE])
+       if (td->bytes_done[DDIR_WRITE])
                ret |= __check_min_rate(td, now, DDIR_WRITE);
-       if (bytes_done[DDIR_TRIM])
+       if (td->bytes_done[DDIR_TRIM])
                ret |= __check_min_rate(td, now, DDIR_TRIM);
 
        return ret;
@@ -255,7 +257,7 @@ static void cleanup_pending_aio(struct thread_data *td)
        /*
         * get immediately available events, if any
         */
-       r = io_u_queued_complete(td, 0, NULL);
+       r = io_u_queued_complete(td, 0);
        if (r < 0)
                return;
 
@@ -276,7 +278,7 @@ static void cleanup_pending_aio(struct thread_data *td)
        }
 
        if (td->cur_depth)
-               r = io_u_queued_complete(td, td->cur_depth, NULL);
+               r = io_u_queued_complete(td, td->cur_depth);
 }
 
 /*
@@ -306,7 +308,7 @@ requeue:
                put_io_u(td, io_u);
                return 1;
        } else if (ret == FIO_Q_QUEUED) {
-               if (io_u_queued_complete(td, 1, NULL) < 0)
+               if (io_u_queued_complete(td, 1) < 0)
                        return 1;
        } else if (ret == FIO_Q_COMPLETED) {
                if (io_u->error) {
@@ -314,7 +316,7 @@ requeue:
                        return 1;
                }
 
-               if (io_u_sync_complete(td, io_u, NULL) < 0)
+               if (io_u_sync_complete(td, io_u) < 0)
                        return 1;
        } else if (ret == FIO_Q_BUSY) {
                if (td_io_commit(td))
@@ -363,6 +365,23 @@ static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
        return 0;
 }
 
+/*
+ * We need to update the runtime consistently in ms, but keep a running
+ * tally of the current elapsed time in microseconds for sub millisecond
+ * updates.
+ */
+static inline void update_runtime(struct thread_data *td,
+                                 unsigned long long *elapsed_us,
+                                 const enum fio_ddir ddir)
+{
+       if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only)
+               return;
+
+       td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000;
+       elapsed_us[ddir] += utime_since_now(&td->start);
+       td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000;
+}
+
 static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir,
                               int *retptr)
 {
@@ -418,8 +437,7 @@ static void check_update_rusage(struct thread_data *td)
        }
 }
 
-static int wait_for_completions(struct thread_data *td, struct timeval *time,
-                               uint64_t *bytes_done)
+static int wait_for_completions(struct thread_data *td, struct timeval *time)
 {
        const int full = queue_full(td);
        int min_evts = 0;
@@ -428,8 +446,8 @@ static int wait_for_completions(struct thread_data *td, struct timeval *time,
        /*
         * if the queue is full, we MUST reap at least 1 event
         */
-       min_evts = min(td->o.iodepth_batch_complete, td->cur_depth);
-       if (full && !min_evts)
+       min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth);
+    if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
                min_evts = 1;
 
        if (time && (__should_check_rate(td, DDIR_READ) ||
@@ -438,7 +456,7 @@ static int wait_for_completions(struct thread_data *td, struct timeval *time,
                fio_gettime(time, NULL);
 
        do {
-               ret = io_u_queued_complete(td, min_evts, bytes_done);
+               ret = io_u_queued_complete(td, min_evts);
                if (ret < 0)
                        break;
        } while (full && (td->cur_depth > td->o.iodepth_low));
@@ -446,13 +464,105 @@ static int wait_for_completions(struct thread_data *td, struct timeval *time,
        return ret;
 }
 
+int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
+                  enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
+                  struct timeval *comp_time)
+{
+       int ret2;
+
+       switch (*ret) {
+       case FIO_Q_COMPLETED:
+               if (io_u->error) {
+                       *ret = -io_u->error;
+                       clear_io_u(td, io_u);
+               } else if (io_u->resid) {
+                       int bytes = io_u->xfer_buflen - io_u->resid;
+                       struct fio_file *f = io_u->file;
+
+                       if (bytes_issued)
+                               *bytes_issued += bytes;
+
+                       if (!from_verify)
+                               trim_io_piece(td, io_u);
+
+                       /*
+                        * zero read, fail
+                        */
+                       if (!bytes) {
+                               if (!from_verify)
+                                       unlog_io_piece(td, io_u);
+                               td_verror(td, EIO, "full resid");
+                               put_io_u(td, io_u);
+                               break;
+                       }
+
+                       io_u->xfer_buflen = io_u->resid;
+                       io_u->xfer_buf += bytes;
+                       io_u->offset += bytes;
+
+                       if (ddir_rw(io_u->ddir))
+                               td->ts.short_io_u[io_u->ddir]++;
+
+                       f = io_u->file;
+                       if (io_u->offset == f->real_file_size)
+                               goto sync_done;
+
+                       requeue_io_u(td, &io_u);
+               } else {
+sync_done:
+                       if (comp_time && (__should_check_rate(td, DDIR_READ) ||
+                           __should_check_rate(td, DDIR_WRITE) ||
+                           __should_check_rate(td, DDIR_TRIM)))
+                               fio_gettime(comp_time, NULL);
+
+                       *ret = io_u_sync_complete(td, io_u);
+                       if (*ret < 0)
+                               break;
+               }
+               return 0;
+       case FIO_Q_QUEUED:
+               /*
+                * if the engine doesn't have a commit hook,
+                * the io_u is really queued. if it does have such
+                * a hook, it has to call io_u_queued() itself.
+                */
+               if (td->io_ops->commit == NULL)
+                       io_u_queued(td, io_u);
+               if (bytes_issued)
+                       *bytes_issued += io_u->xfer_buflen;
+               break;
+       case FIO_Q_BUSY:
+               if (!from_verify)
+                       unlog_io_piece(td, io_u);
+               requeue_io_u(td, &io_u);
+               ret2 = td_io_commit(td);
+               if (ret2 < 0)
+                       *ret = ret2;
+               break;
+       default:
+               assert(*ret < 0);
+               td_verror(td, -(*ret), "td_io_queue");
+               break;
+       }
+
+       if (break_on_this_error(td, ddir, ret))
+               return 1;
+
+       return 0;
+}
+
+static inline int io_in_polling(struct thread_data *td)
+{
+       return !td->o.iodepth_batch_complete_min &&
+                  !td->o.iodepth_batch_complete_max;
+}
+
 /*
  * The main verify engine. Runs over the writes we previously submitted,
  * reads the blocks back in, and checks the crc/md5 of the data.
  */
 static void do_verify(struct thread_data *td, uint64_t verify_bytes)
 {
-       uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
        struct fio_file *f;
        struct io_u *io_u;
        int ret, min_events;
@@ -483,7 +593,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
        io_u = NULL;
        while (!td->terminate) {
                enum fio_ddir ddir;
-               int ret2, full;
+               int full;
 
                update_tv_cache(td);
                check_update_rusage(td);
@@ -514,7 +624,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
                                break;
                        }
                } else {
-                       if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes)
+                       if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
                                break;
 
                        while ((io_u = get_io_u(td)) != NULL) {
@@ -539,7 +649,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
                                        continue;
                                } else if (io_u->ddir == DDIR_TRIM) {
                                        io_u->ddir = DDIR_READ;
-                                       io_u->flags |= IO_U_F_TRIMMED;
+                                       io_u_set(io_u, IO_U_F_TRIMMED);
                                        break;
                                } else if (io_u->ddir == DDIR_WRITE) {
                                        io_u->ddir = DDIR_READ;
@@ -569,57 +679,8 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
                        fio_gettime(&io_u->start_time, NULL);
 
                ret = td_io_queue(td, io_u);
-               switch (ret) {
-               case FIO_Q_COMPLETED:
-                       if (io_u->error) {
-                               ret = -io_u->error;
-                               clear_io_u(td, io_u);
-                       } else if (io_u->resid) {
-                               int bytes = io_u->xfer_buflen - io_u->resid;
-
-                               /*
-                                * zero read, fail
-                                */
-                               if (!bytes) {
-                                       td_verror(td, EIO, "full resid");
-                                       put_io_u(td, io_u);
-                                       break;
-                               }
-
-                               io_u->xfer_buflen = io_u->resid;
-                               io_u->xfer_buf += bytes;
-                               io_u->offset += bytes;
-
-                               if (ddir_rw(io_u->ddir))
-                                       td->ts.short_io_u[io_u->ddir]++;
-
-                               f = io_u->file;
-                               if (io_u->offset == f->real_file_size)
-                                       goto sync_done;
-
-                               requeue_io_u(td, &io_u);
-                       } else {
-sync_done:
-                               ret = io_u_sync_complete(td, io_u, bytes_done);
-                               if (ret < 0)
-                                       break;
-                       }
-                       continue;
-               case FIO_Q_QUEUED:
-                       break;
-               case FIO_Q_BUSY:
-                       requeue_io_u(td, &io_u);
-                       ret2 = td_io_commit(td);
-                       if (ret2 < 0)
-                               ret = ret2;
-                       break;
-               default:
-                       assert(ret < 0);
-                       td_verror(td, -ret, "td_io_queue");
-                       break;
-               }
 
-               if (break_on_this_error(td, ddir, &ret))
+               if (io_queue_event(td, io_u, &ret, ddir, NULL, 1, NULL))
                        break;
 
                /*
@@ -629,8 +690,8 @@ sync_done:
                 */
 reap:
                full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
-               if (full || !td->o.iodepth_batch_complete)
-                       ret = wait_for_completions(td, NULL, bytes_done);
+               if (full || io_in_polling(td))
+                       ret = wait_for_completions(td, NULL);
 
                if (ret < 0)
                        break;
@@ -642,7 +703,7 @@ reap:
                min_events = td->cur_depth;
 
                if (min_events)
-                       ret = io_u_queued_complete(td, min_events, NULL);
+                       ret = io_u_queued_complete(td, min_events);
        } else
                cleanup_pending_aio(td);
 
@@ -658,10 +719,10 @@ static unsigned int exceeds_number_ios(struct thread_data *td)
        if (!td->o.number_ios)
                return 0;
 
-       number_ios = ddir_rw_sum(td->this_io_blocks);
+       number_ios = ddir_rw_sum(td->io_blocks);
        number_ios += td->io_u_queued + td->io_u_in_flight;
 
-       return number_ios >= td->o.number_ios;
+       return number_ios >= (td->o.number_ios * td->loops);
 }
 
 static int io_issue_bytes_exceeded(struct thread_data *td)
@@ -682,6 +743,7 @@ static int io_issue_bytes_exceeded(struct thread_data *td)
        else
                limit = td->o.size;
 
+       limit *= td->loops;
        return bytes >= limit || exceeds_number_ios(td);
 }
 
@@ -703,9 +765,29 @@ static int io_complete_bytes_exceeded(struct thread_data *td)
        else
                limit = td->o.size;
 
+       limit *= td->loops;
        return bytes >= limit || exceeds_number_ios(td);
 }
 
+/*
+ * used to calculate the next io time for rate control
+ *
+ */
+static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
+{
+       uint64_t secs, remainder, bps, bytes;
+
+       assert(!(td->flags & TD_F_CHILD));
+       bytes = td->rate_io_issue_bytes[ddir];
+       bps = td->rate_bps[ddir];
+       if (bps) {
+               secs = bytes / bps;
+               remainder = bytes % bps;
+               return remainder * 1000000 / bps + secs * 1000000;
+       } else
+               return 0;
+}
+
 /*
  * Main IO worker function. It retrieves io_u's to process and queues
  * and reaps them, checking for rate and errors along the way.
@@ -714,7 +796,6 @@ static int io_complete_bytes_exceeded(struct thread_data *td)
  */
 static uint64_t do_io(struct thread_data *td)
 {
-       uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
        unsigned int i;
        int ret = 0;
        uint64_t total_bytes, bytes_issued = 0;
@@ -742,12 +823,17 @@ static uint64_t do_io(struct thread_data *td)
           (td_write(td) && td->o.verify_backlog))
                total_bytes += td->o.size;
 
+       /* In trimwrite mode, each byte is trimmed and then written, so
+        * allow total_bytes to be twice as big */
+       if (td_trimwrite(td))
+               total_bytes += td->total_io_size;
+
        while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
                (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
                td->o.time_based) {
                struct timeval comp_time;
                struct io_u *io_u;
-               int ret2, full;
+               int full;
                enum fio_ddir ddir;
 
                check_update_rusage(td);
@@ -827,97 +913,42 @@ static uint64_t do_io(struct thread_data *td)
                    !td->o.experimental_verify)
                        log_io_piece(td, io_u);
 
-               ret = td_io_queue(td, io_u);
-               switch (ret) {
-               case FIO_Q_COMPLETED:
-                       if (io_u->error) {
-                               ret = -io_u->error;
-                               unlog_io_piece(td, io_u);
-                               clear_io_u(td, io_u);
-                       } else if (io_u->resid) {
-                               int bytes = io_u->xfer_buflen - io_u->resid;
-                               struct fio_file *f = io_u->file;
-
-                               bytes_issued += bytes;
-
-                               trim_io_piece(td, io_u);
-
-                               /*
-                                * zero read, fail
-                                */
-                               if (!bytes) {
-                                       unlog_io_piece(td, io_u);
-                                       td_verror(td, EIO, "full resid");
-                                       put_io_u(td, io_u);
-                                       break;
-                               }
+               if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
+                       if (td->error)
+                               break;
+                       ret = workqueue_enqueue(&td->io_wq, io_u);
 
-                               io_u->xfer_buflen = io_u->resid;
-                               io_u->xfer_buf += bytes;
-                               io_u->offset += bytes;
+                       if (should_check_rate(td))
+                               td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
 
-                               if (ddir_rw(io_u->ddir))
-                                       td->ts.short_io_u[io_u->ddir]++;
+               } else {
+                       ret = td_io_queue(td, io_u);
 
-                               if (io_u->offset == f->real_file_size)
-                                       goto sync_done;
+                       if (should_check_rate(td))
+                               td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
 
-                               requeue_io_u(td, &io_u);
-                       } else {
-sync_done:
-                               if (__should_check_rate(td, DDIR_READ) ||
-                                   __should_check_rate(td, DDIR_WRITE) ||
-                                   __should_check_rate(td, DDIR_TRIM))
-                                       fio_gettime(&comp_time, NULL);
+                       if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time))
+                               break;
 
-                               ret = io_u_sync_complete(td, io_u, bytes_done);
-                               if (ret < 0)
-                                       break;
-                               bytes_issued += io_u->xfer_buflen;
-                       }
-                       break;
-               case FIO_Q_QUEUED:
                        /*
-                        * if the engine doesn't have a commit hook,
-                        * the io_u is really queued. if it does have such
-                        * a hook, it has to call io_u_queued() itself.
+                        * See if we need to complete some commands. Note that
+                        * we can get BUSY even without IO queued, if the
+                        * system is resource starved.
                         */
-                       if (td->io_ops->commit == NULL)
-                               io_u_queued(td, io_u);
-                       bytes_issued += io_u->xfer_buflen;
-                       break;
-               case FIO_Q_BUSY:
-                       unlog_io_piece(td, io_u);
-                       requeue_io_u(td, &io_u);
-                       ret2 = td_io_commit(td);
-                       if (ret2 < 0)
-                               ret = ret2;
-                       break;
-               default:
-                       assert(ret < 0);
-                       put_io_u(td, io_u);
-                       break;
-               }
-
-               if (break_on_this_error(td, ddir, &ret))
-                       break;
-
-               /*
-                * See if we need to complete some commands. Note that we
-                * can get BUSY even without IO queued, if the system is
-                * resource starved.
-                */
 reap:
-               full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
-               if (full || !td->o.iodepth_batch_complete)
-                       ret = wait_for_completions(td, &comp_time, bytes_done);
+                       full = queue_full(td) ||
+                               (ret == FIO_Q_BUSY && td->cur_depth);
+                       if (full || io_in_polling(td))
+                               ret = wait_for_completions(td, &comp_time);
+               }
                if (ret < 0)
                        break;
-               if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO))
+               if (!ddir_rw_sum(td->bytes_done) &&
+                   !(td->io_ops->flags & FIO_NOIO))
                        continue;
 
-               if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
-                       if (check_min_rate(td, &comp_time, bytes_done)) {
+               if (!in_ramp_time(td) && should_check_rate(td)) {
+                       if (check_min_rate(td, &comp_time)) {
                                if (exitall_on_terminate)
                                        fio_terminate_threads(td->groupid);
                                td_verror(td, EIO, "check_min_rate");
@@ -958,9 +989,14 @@ reap:
        if (!td->error) {
                struct fio_file *f;
 
-               i = td->cur_depth;
+               if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
+                       workqueue_flush(&td->io_wq);
+                       i = 0;
+               } else
+                       i = td->cur_depth;
+
                if (i) {
-                       ret = io_u_queued_complete(td, i, bytes_done);
+                       ret = io_u_queued_complete(td, i);
                        if (td->o.fill_device && td->error == ENOSPC)
                                td->error = 0;
                }
@@ -985,7 +1021,7 @@ reap:
        if (!ddir_rw_sum(td->this_io_bytes))
                td->done = 1;
 
-       return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+       return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
 }
 
 static void cleanup_io_u(struct thread_data *td)
@@ -1172,13 +1208,17 @@ static int switch_ioscheduler(struct thread_data *td)
        /*
         * Read back and check that the selected scheduler is now the default.
         */
+       memset(tmp, 0, sizeof(tmp));
        ret = fread(tmp, sizeof(tmp), 1, f);
        if (ferror(f) || ret < 0) {
                td_verror(td, errno, "fread");
                fclose(f);
                return 1;
        }
-       tmp[sizeof(tmp) - 1] = '\0';
+       /*
+        * either a list of io schedulers or "none\n" is expected.
+        */
+       tmp[strlen(tmp) - 1] = '\0';
 
 
        sprintf(tmp2, "[%s]", td->o.ioscheduler);
@@ -1235,7 +1275,8 @@ static int keep_running(struct thread_data *td)
 
 static int exec_string(struct thread_options *o, const char *string, const char *mode)
 {
-       int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+       size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+       int ret;
        char *str;
 
        str = malloc(newlen);
@@ -1255,8 +1296,6 @@ static int exec_string(struct thread_options *o, const char *string, const char
  */
 static uint64_t do_dry_run(struct thread_data *td)
 {
-       uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
-
        td_set_runstate(td, TD_RUNNING);
 
        while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
@@ -1271,7 +1310,7 @@ static uint64_t do_dry_run(struct thread_data *td)
                if (!io_u)
                        break;
 
-               io_u->flags |= IO_U_F_FLIGHT;
+               io_u_set(io_u, IO_U_F_FLIGHT);
                io_u->error = 0;
                io_u->resid = 0;
                if (ddir_rw(acct_ddir(io_u)))
@@ -1287,11 +1326,34 @@ static uint64_t do_dry_run(struct thread_data *td)
                    !td->o.experimental_verify)
                        log_io_piece(td, io_u);
 
-               ret = io_u_sync_complete(td, io_u, bytes_done);
+               ret = io_u_sync_complete(td, io_u);
                (void) ret;
        }
 
-       return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+       return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
+}
+
+static void io_workqueue_fn(struct thread_data *td, struct io_u *io_u)
+{
+       const enum fio_ddir ddir = io_u->ddir;
+       int ret;
+
+       dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
+
+       io_u_set(io_u, IO_U_F_NO_FILE_PUT);
+
+       td->cur_depth++;
+
+       ret = td_io_queue(td, io_u);
+
+       dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
+
+       io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
+
+       if (ret == FIO_Q_QUEUED)
+               ret = io_u_queued_complete(td, 1);
+
+       td->cur_depth--;
 }
 
 /*
@@ -1300,7 +1362,7 @@ static uint64_t do_dry_run(struct thread_data *td)
  */
 static void *thread_main(void *data)
 {
-       unsigned long long elapsed;
+       unsigned long long elapsed_us[DDIR_RWDIR_CNT] = { 0, };
        struct thread_data *td = data;
        struct thread_options *o = &td->o;
        pthread_condattr_t attr;
@@ -1491,29 +1553,34 @@ static void *thread_main(void *data)
 
        fio_verify_init(td);
 
+       if ((o->io_submit_mode == IO_MODE_OFFLOAD) &&
+           workqueue_init(td, &td->io_wq, io_workqueue_fn, td->o.iodepth))
+               goto err;
+
        fio_gettime(&td->epoch, NULL);
        fio_getrusage(&td->ru_start);
+       memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
+       memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
+
+       if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
+                       o->ratemin[DDIR_TRIM]) {
+               memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
+                                       sizeof(td->bw_sample_time));
+               memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
+                                       sizeof(td->bw_sample_time));
+               memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
+                                       sizeof(td->bw_sample_time));
+       }
+
        clear_state = 0;
        while (keep_running(td)) {
                uint64_t verify_bytes;
 
                fio_gettime(&td->start, NULL);
-               memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
-               memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
                memcpy(&td->tv_cache, &td->start, sizeof(td->start));
 
-               if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
-                               o->ratemin[DDIR_TRIM]) {
-                       memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
-                                               sizeof(td->bw_sample_time));
-                       memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
-                                               sizeof(td->bw_sample_time));
-                       memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
-                                               sizeof(td->bw_sample_time));
-               }
-
                if (clear_state)
-                       clear_io_state(td);
+                       clear_io_state(td, 0);
 
                prune_io_piece_log(td);
 
@@ -1524,19 +1591,22 @@ static void *thread_main(void *data)
 
                clear_state = 1;
 
+               /*
+                * Make sure we've successfully updated the rusage stats
+                * before waiting on the stat mutex. Otherwise we could have
+                * the stat thread holding stat mutex and waiting for
+                * the rusage_sem, which would never get upped because
+                * this thread is waiting for the stat mutex.
+                */
+               check_update_rusage(td);
+
                fio_mutex_down(stat_mutex);
-               if (td_read(td) && td->io_bytes[DDIR_READ]) {
-                       elapsed = mtime_since_now(&td->start);
-                       td->ts.runtime[DDIR_READ] += elapsed;
-               }
-               if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
-                       elapsed = mtime_since_now(&td->start);
-                       td->ts.runtime[DDIR_WRITE] += elapsed;
-               }
-               if (td_trim(td) && td->io_bytes[DDIR_TRIM]) {
-                       elapsed = mtime_since_now(&td->start);
-                       td->ts.runtime[DDIR_TRIM] += elapsed;
-               }
+               if (td_read(td) && td->io_bytes[DDIR_READ])
+                       update_runtime(td, elapsed_us, DDIR_READ);
+               if (td_write(td) && td->io_bytes[DDIR_WRITE])
+                       update_runtime(td, elapsed_us, DDIR_WRITE);
+               if (td_trim(td) && td->io_bytes[DDIR_TRIM])
+                       update_runtime(td, elapsed_us, DDIR_TRIM);
                fio_gettime(&td->start, NULL);
                fio_mutex_up(stat_mutex);
 
@@ -1548,14 +1618,19 @@ static void *thread_main(void *data)
                    (td->io_ops->flags & FIO_UNIDIR))
                        continue;
 
-               clear_io_state(td);
+               clear_io_state(td, 0);
 
                fio_gettime(&td->start, NULL);
 
                do_verify(td, verify_bytes);
 
+               /*
+                * See comment further up for why this is done here.
+                */
+               check_update_rusage(td);
+
                fio_mutex_down(stat_mutex);
-               td->ts.runtime[DDIR_READ] += mtime_since_now(&td->start);
+               update_runtime(td, elapsed_us, DDIR_READ);
                fio_gettime(&td->start, NULL);
                fio_mutex_up(stat_mutex);
 
@@ -1570,21 +1645,16 @@ static void *thread_main(void *data)
        td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
 
        if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
-           (td->o.verify != VERIFY_NONE && td_write(td))) {
-               struct all_io_list *state;
-               size_t sz;
-
-               state = get_all_io_list(td->thread_number, &sz);
-               if (state) {
-                       __verify_save_state(state, "local");
-                       free(state);
-               }
-       }
+           (td->o.verify != VERIFY_NONE && td_write(td)))
+               verify_save_state(td->thread_number);
 
        fio_unpin_memory(td);
 
        fio_writeout_logs(td);
 
+       if (o->io_submit_mode == IO_MODE_OFFLOAD)
+               workqueue_exit(&td->io_wq);
+
        if (td->flags & TD_F_COMPRESS_LOG)
                tp_exit(&td->tp_data);
 
@@ -1826,7 +1896,7 @@ void check_trigger_file(void)
                if (nr_clients)
                        fio_clients_send_trigger(trigger_remote_cmd);
                else {
-                       verify_save_state();
+                       verify_save_state(IO_LIST_ALL);
                        fio_terminate_threads(TERMINATE_ALL);
                        exec_trigger(trigger_cmd);
                }
@@ -1842,11 +1912,12 @@ static int fio_verify_load_state(struct thread_data *td)
 
        if (is_backend) {
                void *data;
+               int ver;
 
                ret = fio_server_get_verify_state(td->o.name,
-                                       td->thread_number - 1, &data);
+                                       td->thread_number - 1, &data, &ver);
                if (!ret)
-                       verify_convert_assign_state(td, data);
+                       verify_convert_assign_state(td, data, ver);
        } else
                ret = verify_load_state(td, "local");
 
@@ -1860,6 +1931,27 @@ static void do_usleep(unsigned int usecs)
        usleep(usecs);
 }
 
+static int check_mount_writes(struct thread_data *td)
+{
+       struct fio_file *f;
+       unsigned int i;
+
+       if (!td_write(td) || td->o.allow_mounted_write)
+               return 0;
+
+       for_each_file(td, f, i) {
+               if (f->filetype != FIO_TYPE_BD)
+                       continue;
+               if (device_is_mounted(f->file_name))
+                       goto mounted;
+       }
+
+       return 0;
+mounted:
+       log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.", f->file_name);
+       return 1;
+}
+
 /*
  * Main function for kicking off and reaping jobs, as needed.
  */
@@ -1878,13 +1970,15 @@ static void run_threads(void)
 
        nr_thread = nr_process = 0;
        for_each_td(td, i) {
+               if (check_mount_writes(td))
+                       return;
                if (td->o.use_thread)
                        nr_thread++;
                else
                        nr_process++;
        }
 
-       if (output_format == FIO_OUTPUT_NORMAL) {
+       if (output_format & FIO_OUTPUT_NORMAL) {
                log_info("Starting ");
                if (nr_thread)
                        log_info("%d thread%s", nr_thread,
@@ -2226,7 +2320,7 @@ int fio_backend(void)
                        for (i = 0; i < DDIR_RWDIR_CNT; i++) {
                                struct io_log *log = agg_io_log[i];
 
-                               flush_log(log);
+                               flush_log(log, 0);
                                free_log(log);
                        }
                }