backend: fix should_check_rate() typo in wait_for_completions()
[fio.git] / backend.c
index 3c2697dec3edb71740cf51d3eddd9cd7fe7b2828..3424a0982b534bc60b8ea037814427e9156b929f 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -418,6 +418,34 @@ static void check_update_rusage(struct thread_data *td)
        }
 }
 
+static int wait_for_completions(struct thread_data *td, struct timeval *time,
+                               uint64_t *bytes_done)
+{
+       const int full = queue_full(td);
+       int min_evts = 0;
+       int ret;
+
+       /*
+        * if the queue is full, we MUST reap at least 1 event
+        */
+       min_evts = min(td->o.iodepth_batch_complete, td->cur_depth);
+       if (full && !min_evts)
+               min_evts = 1;
+
+       if (time && (__should_check_rate(td, DDIR_READ) ||
+           __should_check_rate(td, DDIR_WRITE) ||
+           __should_check_rate(td, DDIR_TRIM)))
+               fio_gettime(time, NULL);
+
+       do {
+               ret = io_u_queued_complete(td, min_evts, bytes_done);
+               if (ret < 0)
+                       break;
+       } while (full && (td->cur_depth > td->o.iodepth_low));
+
+       return ret;
+}
+
 /*
  * The main verify engine. Runs over the writes we previously submitted,
  * reads the blocks back in, and checks the crc/md5 of the data.
@@ -599,27 +627,9 @@ sync_done:
                 */
 reap:
                full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
-               if (full || !td->o.iodepth_batch_complete) {
-                       min_events = min(td->o.iodepth_batch_complete,
-                                        td->cur_depth);
-                       /*
-                        * if the queue is full, we MUST reap at least 1 event
-                        */
-                       if (full && !min_events)
-                               min_events = 1;
+               if (full || !td->o.iodepth_batch_complete)
+                       ret = wait_for_completions(td, NULL, bytes_done);
 
-                       do {
-                               /*
-                                * Reap required number of io units, if any,
-                                * and do the verification on them through
-                                * the callback handler
-                                */
-                               if (io_u_queued_complete(td, min_events, bytes_done) < 0) {
-                                       ret = -1;
-                                       break;
-                               }
-                       } while (full && (td->cur_depth > td->o.iodepth_low));
-               }
                if (ret < 0)
                        break;
        }
@@ -707,7 +717,6 @@ static uint64_t do_io(struct thread_data *td)
                (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
                td->o.time_based) {
                struct timeval comp_time;
-               int min_evts = 0;
                struct io_u *io_u;
                int ret2, full;
                enum fio_ddir ddir;
@@ -871,28 +880,8 @@ sync_done:
                 */
 reap:
                full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
-               if (full || !td->o.iodepth_batch_complete) {
-                       min_evts = min(td->o.iodepth_batch_complete,
-                                       td->cur_depth);
-                       /*
-                        * if the queue is full, we MUST reap at least 1 event
-                        */
-                       if (full && !min_evts)
-                               min_evts = 1;
-
-                       if (__should_check_rate(td, DDIR_READ) ||
-                           __should_check_rate(td, DDIR_WRITE) ||
-                           __should_check_rate(td, DDIR_TRIM))
-                               fio_gettime(&comp_time, NULL);
-
-                       do {
-                               ret = io_u_queued_complete(td, min_evts, bytes_done);
-                               if (ret < 0)
-                                       break;
-
-                       } while (full && (td->cur_depth > td->o.iodepth_low));
-               }
-
+               if (full || !td->o.iodepth_batch_complete)
+                       ret = wait_for_completions(td, &comp_time, bytes_done);
                if (ret < 0)
                        break;
                if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO))
@@ -1345,7 +1334,7 @@ static void *thread_main(void *data)
         * Set affinity first, in case it has an impact on the memory
         * allocations.
         */
-       if (o->cpumask_set) {
+       if (fio_option_is_set(o, cpumask)) {
                if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
                        ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
                        if (!ret) {
@@ -1364,7 +1353,8 @@ static void *thread_main(void *data)
 
 #ifdef CONFIG_LIBNUMA
        /* numa node setup */
-       if (o->numa_cpumask_set || o->numa_memmask_set) {
+       if (fio_option_is_set(o, numa_cpunodes) ||
+           fio_option_is_set(o, numa_memnodes)) {
                struct bitmask *mask;
 
                if (numa_available() < 0) {
@@ -1372,7 +1362,7 @@ static void *thread_main(void *data)
                        goto err;
                }
 
-               if (o->numa_cpumask_set) {
+               if (fio_option_is_set(o, numa_cpunodes)) {
                        mask = numa_parse_nodestring(o->numa_cpunodes);
                        ret = numa_run_on_node_mask(mask);
                        numa_free_nodemask(mask);
@@ -1383,8 +1373,7 @@ static void *thread_main(void *data)
                        }
                }
 
-               if (o->numa_memmask_set) {
-
+               if (fio_option_is_set(o, numa_memnodes)) {
                        mask = NULL;
                        if (o->numa_memnodes)
                                mask = numa_parse_nodestring(o->numa_memnodes);
@@ -1430,7 +1419,8 @@ static void *thread_main(void *data)
        if (o->verify_async && verify_async_init(td))
                goto err;
 
-       if (o->ioprio) {
+       if (fio_option_is_set(o, ioprio) ||
+           fio_option_is_set(o, ioprio_class)) {
                ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
                if (ret == -1) {
                        td_verror(td, errno, "ioprio_set");
@@ -1589,7 +1579,7 @@ err:
        cgroup_shutdown(td, &cgroup_mnt);
        verify_free_state(td);
 
-       if (o->cpumask_set) {
+       if (fio_option_is_set(o, cpumask)) {
                ret = fio_cpuset_exit(&o->cpumask);
                if (ret)
                        td_verror(td, ret, "fio_cpuset_exit");
@@ -1804,10 +1794,9 @@ void exec_trigger(const char *cmd)
 void check_trigger_file(void)
 {
        if (__check_trigger_file() || trigger_timedout()) {
-               if (nr_clients) {
-                       if (trigger_remote_cmd)
-                               fio_clients_send_trigger(trigger_remote_cmd);
-               } else {
+               if (nr_clients)
+                       fio_clients_send_trigger(trigger_remote_cmd);
+               else {
                        verify_save_state();
                        fio_terminate_threads(TERMINATE_ALL);
                        exec_trigger(trigger_cmd);