Free disk util on exit, not on stats display
[fio.git] / backend.c
index eaa6ea77752e9d82feaf968c1aed5c6fdc20778c..4c271ba92aba4e71493f53d1ba62c6c0da177240 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -409,6 +409,9 @@ static void do_verify(struct thread_data *td)
                        }
                }
 
+               if (flow_threshold_exceeded(td))
+                       continue;
+
                io_u = __get_io_u(td);
                if (!io_u)
                        break;
@@ -491,7 +494,10 @@ sync_done:
                if (full || !td->o.iodepth_batch_complete) {
                        min_events = min(td->o.iodepth_batch_complete,
                                         td->cur_depth);
-                       if (full && !min_events && td->o.iodepth_batch_complete != 0)
+                       /*
+                        * if the queue is full, we MUST reap at least 1 event
+                        */
+                       if (full && !min_events)
                                min_events = 1;
 
                        do {
@@ -523,6 +529,20 @@ sync_done:
        dprint(FD_VERIFY, "exiting loop\n");
 }
 
+static int io_bytes_exceeded(struct thread_data *td)
+{
+       unsigned long long bytes;
+
+       if (td_rw(td))
+               bytes = td->this_io_bytes[0] + td->this_io_bytes[1];
+       else if (td_write(td))
+               bytes = td->this_io_bytes[1];
+       else
+               bytes = td->this_io_bytes[0];
+
+       return bytes >= td->o.size;
+}
+
 /*
  * Main IO worker function. It retrieves io_u's to process and queues
  * and reaps them, checking for rate and errors along the way.
@@ -537,9 +557,9 @@ static void do_io(struct thread_data *td)
        else
                td_set_runstate(td, TD_RUNNING);
 
-       while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
-               (!flist_empty(&td->trim_list)) ||
-               ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) {
+       while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+               (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+               td->o.time_based) {
                struct timeval comp_time;
                unsigned long bytes_done[2] = { 0, 0 };
                int min_evts = 0;
@@ -560,6 +580,9 @@ static void do_io(struct thread_data *td)
                        }
                }
 
+               if (flow_threshold_exceeded(td))
+                       continue;
+
                io_u = get_io_u(td);
                if (!io_u)
                        break;
@@ -567,11 +590,12 @@ static void do_io(struct thread_data *td)
                ddir = io_u->ddir;
 
                /*
-                * Add verification end_io handler, if asked to verify
-                * a previously written file.
+                * Add verification end_io handler if:
+                *      - Asked to verify (!td_rw(td))
+                *      - Or the io_u is from our verify list (mixed write/ver)
                 */
                if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
-                   !td_rw(td)) {
+                   ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
                        if (td->o.verify_async)
                                io_u->end_io = verify_io_u_async;
                        else
@@ -656,7 +680,10 @@ sync_done:
                if (full || !td->o.iodepth_batch_complete) {
                        min_evts = min(td->o.iodepth_batch_complete,
                                        td->cur_depth);
-                       if (full && !min_evts && td->o.iodepth_batch_complete != 0)
+                       /*
+                        * if the queue is full, we MUST reap at least 1 event
+                        */
+                       if (full && !min_evts)
                                min_evts = 1;
 
                        if (__should_check_rate(td, 0) ||
@@ -756,12 +783,13 @@ static void cleanup_io_u(struct thread_data *td)
 static int init_io_u(struct thread_data *td)
 {
        struct io_u *io_u;
-       unsigned int max_bs;
+       unsigned int max_bs, min_write;
        int cl_align, i, max_units;
        char *p;
 
        max_units = td->o.iodepth;
        max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
+       min_write = td->o.min_bs[DDIR_WRITE];
        td->orig_buffer_size = (unsigned long long) max_bs
                                        * (unsigned long long) max_units;
 
@@ -810,7 +838,7 @@ static int init_io_u(struct thread_data *td)
                        dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
 
                        if (td_write(td))
-                               io_u_fill_buffer(td, io_u, max_bs);
+                               io_u_fill_buffer(td, io_u, min_write, max_bs);
                        if (td_write(td) && td->o.verify_pattern_bytes) {
                                /*
                                 * Fill the buffer with the pattern if we are
@@ -1266,6 +1294,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
                        if (errno == ECHILD) {
                                log_err("fio: pid=%d disappeared %d\n",
                                                (int) td->pid, td->runstate);
+                               td->sig = ECHILD;
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
@@ -1277,6 +1306,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
                                if (sig != SIGTERM)
                                        log_err("fio: pid=%d, got signal=%d\n",
                                                        (int) td->pid, sig);
+                               td->sig = sig;
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
@@ -1633,6 +1663,7 @@ int fio_backend(void)
        for_each_td(td, i)
                fio_options_free(td);
 
+       free_disk_util();
        cgroup_kill(cgroup_list);
        sfree(cgroup_list);
        sfree(cgroup_mnt);