mutex: make 0/1 FIO_MUTEX_LOCKED and FIO_MUTEX_UNLOCKED
[fio.git] / backend.c
index eaa6ea77752e9d82feaf968c1aed5c6fdc20778c..e41e8f1b41fffe64a8dc023523b48e482dd662d0 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -50,6 +50,7 @@
 #include "server.h"
 
 static pthread_t disk_util_thread;
+static struct fio_mutex *disk_thread_mutex;
 static struct fio_mutex *startup_mutex;
 static struct fio_mutex *writeout_mutex;
 static struct flist_head *cgroup_list;
@@ -87,6 +88,11 @@ static void sig_int(int sig)
        }
 }
 
+static void sig_show_status(int sig)
+{
+       show_running_run_stats();
+}
+
 static void set_sig_handlers(void)
 {
        struct sigaction act;
@@ -101,6 +107,11 @@ static void set_sig_handlers(void)
        act.sa_flags = SA_RESTART;
        sigaction(SIGTERM, &act, NULL);
 
+       memset(&act, 0, sizeof(act));
+       act.sa_handler = sig_show_status;
+       act.sa_flags = SA_RESTART;
+       sigaction(SIGUSR1, &act, NULL);
+
        if (is_backend) {
                memset(&act, 0, sizeof(act));
                act.sa_handler = sig_int;
@@ -409,6 +420,9 @@ static void do_verify(struct thread_data *td)
                        }
                }
 
+               if (flow_threshold_exceeded(td))
+                       continue;
+
                io_u = __get_io_u(td);
                if (!io_u)
                        break;
@@ -491,7 +505,10 @@ sync_done:
                if (full || !td->o.iodepth_batch_complete) {
                        min_events = min(td->o.iodepth_batch_complete,
                                         td->cur_depth);
-                       if (full && !min_events && td->o.iodepth_batch_complete != 0)
+                       /*
+                        * if the queue is full, we MUST reap at least 1 event
+                        */
+                       if (full && !min_events)
                                min_events = 1;
 
                        do {
@@ -523,6 +540,20 @@ sync_done:
        dprint(FD_VERIFY, "exiting loop\n");
 }
 
+static int io_bytes_exceeded(struct thread_data *td)
+{
+       unsigned long long bytes;
+
+       if (td_rw(td))
+               bytes = td->this_io_bytes[0] + td->this_io_bytes[1];
+       else if (td_write(td))
+               bytes = td->this_io_bytes[1];
+       else
+               bytes = td->this_io_bytes[0];
+
+       return bytes >= td->o.size;
+}
+
 /*
  * Main IO worker function. It retrieves io_u's to process and queues
  * and reaps them, checking for rate and errors along the way.
@@ -537,9 +568,9 @@ static void do_io(struct thread_data *td)
        else
                td_set_runstate(td, TD_RUNNING);
 
-       while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
-               (!flist_empty(&td->trim_list)) ||
-               ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) {
+       while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+               (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
+               td->o.time_based) {
                struct timeval comp_time;
                unsigned long bytes_done[2] = { 0, 0 };
                int min_evts = 0;
@@ -560,6 +591,9 @@ static void do_io(struct thread_data *td)
                        }
                }
 
+               if (flow_threshold_exceeded(td))
+                       continue;
+
                io_u = get_io_u(td);
                if (!io_u)
                        break;
@@ -567,11 +601,12 @@ static void do_io(struct thread_data *td)
                ddir = io_u->ddir;
 
                /*
-                * Add verification end_io handler, if asked to verify
-                * a previously written file.
+                * Add verification end_io handler if:
+                *      - Asked to verify (!td_rw(td))
+                *      - Or the io_u is from our verify list (mixed write/ver)
                 */
                if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
-                   !td_rw(td)) {
+                   ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
                        if (td->o.verify_async)
                                io_u->end_io = verify_io_u_async;
                        else
@@ -656,7 +691,10 @@ sync_done:
                if (full || !td->o.iodepth_batch_complete) {
                        min_evts = min(td->o.iodepth_batch_complete,
                                        td->cur_depth);
-                       if (full && !min_evts && td->o.iodepth_batch_complete != 0)
+                       /*
+                        * if the queue is full, we MUST reap at least 1 event
+                        */
+                       if (full && !min_evts)
                                min_evts = 1;
 
                        if (__should_check_rate(td, 0) ||
@@ -756,12 +794,13 @@ static void cleanup_io_u(struct thread_data *td)
 static int init_io_u(struct thread_data *td)
 {
        struct io_u *io_u;
-       unsigned int max_bs;
+       unsigned int max_bs, min_write;
        int cl_align, i, max_units;
        char *p;
 
        max_units = td->o.iodepth;
        max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
+       min_write = td->o.min_bs[DDIR_WRITE];
        td->orig_buffer_size = (unsigned long long) max_bs
                                        * (unsigned long long) max_units;
 
@@ -810,7 +849,7 @@ static int init_io_u(struct thread_data *td)
                        dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
 
                        if (td_write(td))
-                               io_u_fill_buffer(td, io_u, max_bs);
+                               io_u_fill_buffer(td, io_u, min_write, max_bs);
                        if (td_write(td) && td->o.verify_pattern_bytes) {
                                /*
                                 * Fill the buffer with the pattern if we are
@@ -1017,7 +1056,7 @@ static void *thread_main(void *data)
                }
        }
 
-       if (td->o.cgroup_weight && cgroup_setup(td, cgroup_list, &cgroup_mnt))
+       if (td->o.cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
                goto err;
 
        errno = 0;
@@ -1266,6 +1305,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
                        if (errno == ECHILD) {
                                log_err("fio: pid=%d disappeared %d\n",
                                                (int) td->pid, td->runstate);
+                               td->sig = ECHILD;
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
@@ -1277,6 +1317,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
                                if (sig != SIGTERM)
                                        log_err("fio: pid=%d, got signal=%d\n",
                                                        (int) td->pid, sig);
+                               td->sig = sig;
                                td_set_runstate(td, TD_REAPED);
                                goto reaped;
                        }
@@ -1548,20 +1589,28 @@ static void run_threads(void)
        fio_unpin_memory();
 }
 
+void wait_for_disk_thread_exit(void)
+{
+       fio_mutex_down(disk_thread_mutex);
+}
+
 static void *disk_thread_main(void *data)
 {
+       int ret = 0;
+
        fio_mutex_up(startup_mutex);
 
-       while (threads) {
+       while (threads && !ret) {
                usleep(DISK_UTIL_MSEC * 1000);
                if (!threads)
                        break;
-               update_io_ticks();
+               ret = update_io_ticks();
 
                if (!is_backend)
                        print_thread_status();
        }
 
+       fio_mutex_up(disk_thread_mutex);
        return NULL;
 }
 
@@ -1569,14 +1618,20 @@ static int create_disk_util_thread(void)
 {
        int ret;
 
+       setup_disk_util();
+
+       disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
+
        ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL);
        if (ret) {
+               fio_mutex_remove(disk_thread_mutex);
                log_err("Can't create disk util thread: %s\n", strerror(ret));
                return 1;
        }
 
        ret = pthread_detach(disk_util_thread);
        if (ret) {
+               fio_mutex_remove(disk_thread_mutex);
                log_err("Can't detatch disk util thread: %s\n", strerror(ret));
                return 1;
        }
@@ -1606,10 +1661,10 @@ int fio_backend(void)
                setup_log(&agg_io_log[DDIR_WRITE], 0);
        }
 
-       startup_mutex = fio_mutex_init(0);
+       startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
        if (startup_mutex == NULL)
                return 1;
-       writeout_mutex = fio_mutex_init(1);
+       writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
        if (writeout_mutex == NULL)
                return 1;
 
@@ -1633,11 +1688,13 @@ int fio_backend(void)
        for_each_td(td, i)
                fio_options_free(td);
 
+       free_disk_util();
        cgroup_kill(cgroup_list);
        sfree(cgroup_list);
        sfree(cgroup_mnt);
 
        fio_mutex_remove(startup_mutex);
        fio_mutex_remove(writeout_mutex);
+       fio_mutex_remove(disk_thread_mutex);
        return exit_value;
 }