fio: allow milliseconds on all time specifiers
[fio.git] / backend.c
index bf9d066e012abc8a44a938fcbf319a68a81d6d64..b92877efd1ca8a167555cd753387df6eaf39fbc5 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -52,6 +52,7 @@
 #include "server.h"
 #include "lib/getrusage.h"
 #include "idletime.h"
+#include "err.h"
 
 static pthread_t disk_util_thread;
 static struct fio_mutex *disk_thread_mutex;
@@ -345,7 +346,7 @@ static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
                return 0;
        if (!td->o.timeout)
                return 0;
-       if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
+       if (mtime_since(&td->epoch, t) >= td->o.timeout )
                return 1;
 
        return 0;
@@ -478,6 +479,12 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
                                break;
 
                        while ((io_u = get_io_u(td)) != NULL) {
+                               if (IS_ERR(io_u)) {
+                                       io_u = NULL;
+                                       ret = FIO_Q_BUSY;
+                                       goto reap;
+                               }
+
                                /*
                                 * We are only interested in the places where
                                 * we wrote or trimmed IOs. Turn those into
@@ -574,6 +581,7 @@ sync_done:
                 * completed io_u's first. Note that we can get BUSY even
                 * without IO queued, if the system is resource starved.
                 */
+reap:
                full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
                if (full || !td->o.iodepth_batch_complete) {
                        min_events = min(td->o.iodepth_batch_complete,
@@ -642,7 +650,7 @@ static uint64_t do_io(struct thread_data *td)
        uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
        unsigned int i;
        int ret = 0;
-       uint64_t bytes_issued = 0;
+       uint64_t total_bytes, bytes_issued = 0;
 
        if (in_ramp_time(td))
                td_set_runstate(td, TD_RAMP);
@@ -651,6 +659,16 @@ static uint64_t do_io(struct thread_data *td)
 
        lat_target_init(td);
 
+       /*
+        * If verify_backlog is enabled, we'll run the verify in this
+        * handler as well. For that case, we may need up to twice the
+        * amount of bytes.
+        */
+       total_bytes = td->o.size;
+       if (td->o.verify != VERIFY_NONE &&
+          (td_write(td) && td->o.verify_backlog))
+               total_bytes += td->o.size;
+
        while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
                (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
                td->o.time_based) {
@@ -678,11 +696,18 @@ static uint64_t do_io(struct thread_data *td)
                if (flow_threshold_exceeded(td))
                        continue;
 
-               if (bytes_issued >= (uint64_t) td->o.size)
+               if (bytes_issued >= total_bytes)
                        break;
 
                io_u = get_io_u(td);
-               if (!io_u) {
+               if (IS_ERR_OR_NULL(io_u)) {
+                       int err = PTR_ERR(io_u);
+
+                       io_u = NULL;
+                       if (err == -EBUSY) {
+                               ret = FIO_Q_BUSY;
+                               goto reap;
+                       }
                        if (td->o.latency_target)
                                goto reap;
                        break;
@@ -714,6 +739,11 @@ static uint64_t do_io(struct thread_data *td)
                else
                        td_set_runstate(td, TD_RUNNING);
 
+               /*
+                * Always log IO before it's issued, so we know the specific
+                * order of it. The logged unit will track when the IO has
+                * completed.
+                */
                if (td_write(td) && io_u->ddir == DDIR_WRITE &&
                    td->o.do_verify &&
                    td->o.verify != VERIFY_NONE &&
@@ -1109,6 +1139,9 @@ static int keep_running(struct thread_data *td)
                if (diff < td_max_bs(td))
                        return 0;
 
+               if (fio_files_done(td))
+                       return 0;
+
                return 1;
        }
 
@@ -1220,13 +1253,6 @@ static void *thread_main(void *data)
        fio_mutex_down(td->mutex);
        dprint(FD_MUTEX, "done waiting on td->mutex\n");
 
-       /*
-        * the ->mutex mutex is now no longer used, close it to avoid
-        * eating a file descriptor
-        */
-       fio_mutex_remove(td->mutex);
-       td->mutex = NULL;
-
        /*
         * A new gid requires privilege, so we need to do this before setting
         * the uid.
@@ -1506,6 +1532,9 @@ err:
        fio_mutex_remove(td->rusage_sem);
        td->rusage_sem = NULL;
 
+       fio_mutex_remove(td->mutex);
+       td->mutex = NULL;
+
        td_set_runstate(td, TD_EXITED);
        return (void *) (uintptr_t) td->error;
 }
@@ -1754,7 +1783,7 @@ static void run_threads(void)
                        if (td->o.start_delay) {
                                spent = mtime_since_genesis();
 
-                               if (td->o.start_delay * 1000 > spent)
+                               if (td->o.start_delay > spent)
                                        continue;
                        }