Define SIGUSR1 to inform fio to dump run stats while continuing to run
[fio.git] / backend.c
index 52791040ff21384d283b2a09de3ff9718e19d866..f4bc553b0a5ee9dab880685f1844221975d4c64f 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -87,6 +87,11 @@ static void sig_int(int sig)
        }
 }
 
+static void sig_show_status(int sig)
+{
+       show_running_run_stats();
+}
+
 static void set_sig_handlers(void)
 {
        struct sigaction act;
@@ -101,6 +106,11 @@ static void set_sig_handlers(void)
        act.sa_flags = SA_RESTART;
        sigaction(SIGTERM, &act, NULL);
 
+       memset(&act, 0, sizeof(act));
+       act.sa_handler = sig_show_status;
+       act.sa_flags = SA_RESTART;
+       sigaction(SIGUSR1, &act, NULL);
+
        if (is_backend) {
                memset(&act, 0, sizeof(act));
                act.sa_handler = sig_int;
@@ -494,7 +504,10 @@ sync_done:
                if (full || !td->o.iodepth_batch_complete) {
                        min_events = min(td->o.iodepth_batch_complete,
                                         td->cur_depth);
-                       if (full && !min_events && td->o.iodepth_batch_complete != 0)
+                       /*
+                        * if the queue is full, we MUST reap at least 1 event
+                        */
+                       if (full && !min_events)
                                min_events = 1;
 
                        do {
@@ -526,6 +539,20 @@ sync_done:
        dprint(FD_VERIFY, "exiting loop\n");
 }
 
+static int io_bytes_exceeded(struct thread_data *td)
+{
+       unsigned long long bytes;
+
+       if (td_rw(td))
+               bytes = td->this_io_bytes[0] + td->this_io_bytes[1];
+       else if (td_write(td))
+               bytes = td->this_io_bytes[1];
+       else
+               bytes = td->this_io_bytes[0];
+
+       return bytes >= td->o.size;
+}
+
 /*
  * Main IO worker function. It retrieves io_u's to process and queues
  * and reaps them, checking for rate and errors along the way.
@@ -540,9 +567,9 @@ static void do_io(struct thread_data *td)
        else
                td_set_runstate(td, TD_RUNNING);
 
-       while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
-               (!flist_empty(&td->trim_list)) ||
-               ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) {
+       while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+               (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+               td->o.time_based) {
                struct timeval comp_time;
                unsigned long bytes_done[2] = { 0, 0 };
                int min_evts = 0;
@@ -573,11 +600,12 @@ static void do_io(struct thread_data *td)
                ddir = io_u->ddir;
 
                /*
-                * Add verification end_io handler, if asked to verify
-                * a previously written file.
+                * Add verification end_io handler if:
+                *      - Asked to verify (!td_rw(td))
+                *      - Or the io_u is from our verify list (mixed write/ver)
                 */
                if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
-                   !td_rw(td)) {
+                   ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
                        if (td->o.verify_async)
                                io_u->end_io = verify_io_u_async;
                        else
@@ -662,7 +690,10 @@ sync_done:
                if (full || !td->o.iodepth_batch_complete) {
                        min_evts = min(td->o.iodepth_batch_complete,
                                        td->cur_depth);
-                       if (full && !min_evts && td->o.iodepth_batch_complete != 0)
+                       /*
+                        * if the queue is full, we MUST reap at least 1 event
+                        */
+                       if (full && !min_evts)
                                min_evts = 1;
 
                        if (__should_check_rate(td, 0) ||
@@ -762,12 +793,13 @@ static void cleanup_io_u(struct thread_data *td)
 static int init_io_u(struct thread_data *td)
 {
        struct io_u *io_u;
-       unsigned int max_bs;
+       unsigned int max_bs, min_write;
        int cl_align, i, max_units;
        char *p;
 
        max_units = td->o.iodepth;
        max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
+       min_write = td->o.min_bs[DDIR_WRITE];
        td->orig_buffer_size = (unsigned long long) max_bs
                                        * (unsigned long long) max_units;
 
@@ -816,7 +848,7 @@ static int init_io_u(struct thread_data *td)
                        dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
 
                        if (td_write(td))
-                               io_u_fill_buffer(td, io_u, max_bs);
+                               io_u_fill_buffer(td, io_u, min_write, max_bs);
                        if (td_write(td) && td->o.verify_pattern_bytes) {
                                /*
                                 * Fill the buffer with the pattern if we are
@@ -1272,6 +1304,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
                        if (errno == ECHILD) {
                                log_err("fio: pid=%d disappeared %d\n",
                                                (int) td->pid, td->runstate);
+                               td->sig = ECHILD;
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
@@ -1283,6 +1316,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
                                if (sig != SIGTERM)
                                        log_err("fio: pid=%d, got signal=%d\n",
                                                        (int) td->pid, sig);
+                               td->sig = sig;
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
@@ -1639,6 +1673,7 @@ int fio_backend(void)
        for_each_td(td, i)
                fio_options_free(td);
 
+       free_disk_util();
        cgroup_kill(cgroup_list);
        sfree(cgroup_list);
        sfree(cgroup_mnt);