Allow offload with FAKEIO engines
[fio.git] / backend.c
index f519728c2131f073f55869ed765118fbbeafe8b9..05453ae2dea1fd0105ce49414fb49bfdf7b383ff 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -66,7 +66,11 @@ unsigned int stat_number = 0;
 int shm_id = 0;
 int temp_stall_ts;
 unsigned long done_secs = 0;
+#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+pthread_mutex_t overlap_check = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+#else
 pthread_mutex_t overlap_check = PTHREAD_MUTEX_INITIALIZER;
+#endif
 
 #define JOB_START_TIMEOUT      (5 * 1000)
 
@@ -219,12 +223,10 @@ static bool check_min_rate(struct thread_data *td, struct timespec *now)
 {
        bool ret = false;
 
-       if (td->bytes_done[DDIR_READ])
-               ret |= __check_min_rate(td, now, DDIR_READ);
-       if (td->bytes_done[DDIR_WRITE])
-               ret |= __check_min_rate(td, now, DDIR_WRITE);
-       if (td->bytes_done[DDIR_TRIM])
-               ret |= __check_min_rate(td, now, DDIR_TRIM);
+       for_each_rw_ddir(ddir) {
+               if (td->bytes_done[ddir])
+                       ret |= __check_min_rate(td, now, ddir);
+       }
 
        return ret;
 }
@@ -1006,12 +1008,6 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
                if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
                    ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
 
-                       if (!td->o.verify_pattern_bytes) {
-                               io_u->rand_seed = __rand(&td->verify_state);
-                               if (sizeof(int) != sizeof(long *))
-                                       io_u->rand_seed *= __rand(&td->verify_state);
-                       }
-
                        if (verify_state_should_stop(td, io_u)) {
                                put_io_u(td, io_u);
                                break;
@@ -1541,7 +1537,7 @@ static void *thread_main(void *data)
        uint64_t bytes_done[DDIR_RWDIR_CNT];
        int deadlock_loop_cnt;
        bool clear_state;
-       int ret;
+       int res, ret;
 
        sk_out_assign(sk_out);
        free(fd);
@@ -1866,17 +1862,21 @@ static void *thread_main(void *data)
         * offload mode so that we don't clean up this job while
         * another thread is checking its io_u's for overlap
         */
-       if (td_offload_overlap(td))
-               pthread_mutex_lock(&overlap_check);
+       if (td_offload_overlap(td)) {
+               int res = pthread_mutex_lock(&overlap_check);
+               assert(res == 0);
+       }
        td_set_runstate(td, TD_FINISHING);
-       if (td_offload_overlap(td))
-               pthread_mutex_unlock(&overlap_check);
+       if (td_offload_overlap(td)) {
+               res = pthread_mutex_unlock(&overlap_check);
+               assert(res == 0);
+       }
 
        update_rusage_stat(td);
        td->ts.total_run_time = mtime_since_now(&td->epoch);
-       td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-       td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-       td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+       for_each_rw_ddir(ddir) {
+               td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+       }
 
        if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
            (td->o.verify != VERIFY_NONE && td_write(td)))
@@ -2042,6 +2042,7 @@ reaped:
 
                done_secs += mtime_since_now(&td->epoch) / 1000;
                profile_td_exit(td);
+               flow_exit_job(td);
        }
 
        if (*nr_running == cputhreads && !pending && realthreads)