client/backend: fix incomplete output_format checks
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index d80ef983c15972e4dbafc00fb68319826f16b3f2..6dda5790a7f1d494fd46758ecc9b3b6406e4a11a 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -568,49 +568,47 @@ void io_u_quiesce(struct thread_data *td)
 static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
 {
        enum fio_ddir odir = ddir ^ 1;
-       long usec;
+       long usec, now;
 
        assert(ddir_rw(ddir));
+       now = utime_since_now(&td->start);
 
-       if (td->rate_pending_usleep[ddir] <= 0)
+       /*
+        * if rate_next_io_time is in the past, need to catch up to rate
+        */
+       if (td->rate_next_io_time[ddir] <= now)
                return ddir;
 
        /*
-        * We have too much pending sleep in this direction. See if we
+        * We are ahead of rate in this direction. See if we
         * should switch.
         */
        if (td_rw(td) && td->o.rwmix[odir]) {
                /*
-                * Other direction does not have too much pending, switch
+                * Other direction is behind rate, switch
                 */
-               if (td->rate_pending_usleep[odir] < 100000)
+               if (td->rate_next_io_time[odir] <= now)
                        return odir;
 
                /*
-                * Both directions have pending sleep. Sleep the minimum time
-                * and deduct from both.
+                * Both directions are ahead of rate. sleep the min
+                * switch if necissary
                 */
-               if (td->rate_pending_usleep[ddir] <=
-                       td->rate_pending_usleep[odir]) {
-                       usec = td->rate_pending_usleep[ddir];
+               if (td->rate_next_io_time[ddir] <=
+                       td->rate_next_io_time[odir]) {
+                       usec = td->rate_next_io_time[ddir] - now;
                } else {
-                       usec = td->rate_pending_usleep[odir];
+                       usec = td->rate_next_io_time[odir] - now;
                        ddir = odir;
                }
        } else
-               usec = td->rate_pending_usleep[ddir];
+               usec = td->rate_next_io_time[ddir] - now;
 
        if (td->o.io_submit_mode == IO_MODE_INLINE)
                io_u_quiesce(td);
 
        usec = usec_sleep(td, usec);
 
-       td->rate_pending_usleep[ddir] -= usec;
-
-       odir = ddir ^ 1;
-       if (td_rw(td) && __should_check_rate(td, odir))
-               td->rate_pending_usleep[odir] -= usec;
-
        return ddir;
 }
 
@@ -1580,6 +1578,13 @@ static void __io_u_log_error(struct thread_data *td, struct io_u *io_u)
                io_ddir_name(io_u->ddir),
                io_u->offset, io_u->xfer_buflen);
 
+       if (td->io_ops->errdetails) {
+               char *err = td->io_ops->errdetails(io_u);
+
+               log_err("fio: %s\n", err);
+               free(err);
+       }
+
        if (!td->error)
                td_verror(td, io_u->error, "io_u error");
 }
@@ -1656,18 +1661,6 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u,
        }
 }
 
-static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
-{
-       uint64_t secs, remainder, bps, bytes;
-
-       assert(!(td->flags & TD_F_CHILD));
-       bytes = td->this_io_bytes[ddir];
-       bps = td->rate_bps[ddir];
-       secs = bytes / bps;
-       remainder = bytes % bps;
-       return remainder * 1000000 / bps + secs * 1000000;
-}
-
 static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
                         struct io_completion_data *icd)
 {
@@ -1709,7 +1702,6 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
 
        if (!io_u->error && ddir_rw(ddir)) {
                unsigned int bytes = io_u->buflen - io_u->resid;
-               const enum fio_ddir oddir = ddir ^ 1;
                int ret;
 
                td->io_blocks[ddir]++;
@@ -1738,27 +1730,9 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
                }
 
                if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
-                                          td->runstate == TD_VERIFYING)) {
-                       struct thread_data *__td = td;
-
+                                          td->runstate == TD_VERIFYING))
                        account_io_completion(td, io_u, icd, ddir, bytes);
 
-                       if (td->parent)
-                               __td = td->parent;
-
-                       if (__should_check_rate(__td, ddir)) {
-                               __td->rate_pending_usleep[ddir] =
-                                       (usec_for_io(__td, ddir) -
-                                        utime_since_now(&__td->start));
-                       }
-                       if (ddir != DDIR_TRIM &&
-                           __should_check_rate(__td, oddir)) {
-                               __td->rate_pending_usleep[oddir] =
-                                       (usec_for_io(__td, oddir) -
-                                        utime_since_now(&__td->start));
-                       }
-               }
-
                icd->bytes_done[ddir] += bytes;
 
                if (io_u->end_io) {
@@ -1862,7 +1836,9 @@ int io_u_queued_complete(struct thread_data *td, int min_evts)
        else if (min_evts > td->cur_depth)
                min_evts = td->cur_depth;
 
-       ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
+       /* No worries, td_io_getevents fixes min and max if they are
+        * set incorrectly */
+       ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp);
        if (ret < 0) {
                td_verror(td, -ret, "td_io_getevents");
                return ret;