engines/io_uring: use the atomic load acquire instead of a barrier
[fio.git] / backend.c
index 0d1f473442e368cd82443ae0352dba5878a2b072..05453ae2dea1fd0105ce49414fb49bfdf7b383ff 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -66,7 +66,11 @@ unsigned int stat_number = 0;
 int shm_id = 0;
 int temp_stall_ts;
 unsigned long done_secs = 0;
+#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+pthread_mutex_t overlap_check = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+#else
 pthread_mutex_t overlap_check = PTHREAD_MUTEX_INITIALIZER;
+#endif
 
 #define JOB_START_TIMEOUT      (5 * 1000)
 
@@ -134,8 +138,8 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now,
        unsigned long long bytes = 0;
        unsigned long iops = 0;
        unsigned long spent;
-       unsigned long rate;
-       unsigned int ratemin = 0;
+       unsigned long long rate;
+       unsigned long long ratemin = 0;
        unsigned int rate_iops = 0;
        unsigned int rate_iops_min = 0;
 
@@ -169,7 +173,7 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now,
                         * check bandwidth specified rate
                         */
                        if (bytes < td->rate_bytes[ddir]) {
-                               log_err("%s: rate_min=%uB/s not met, only transferred %lluB\n",
+                               log_err("%s: rate_min=%lluB/s not met, only transferred %lluB\n",
                                        td->o.name, ratemin, bytes);
                                return true;
                        } else {
@@ -180,7 +184,7 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now,
 
                                if (rate < ratemin ||
                                    bytes < td->rate_bytes[ddir]) {
-                                       log_err("%s: rate_min=%uB/s not met, got %luB/s\n",
+                                       log_err("%s: rate_min=%lluB/s not met, got %lluB/s\n",
                                                td->o.name, ratemin, rate);
                                        return true;
                                }
@@ -201,7 +205,7 @@ static bool __check_min_rate(struct thread_data *td, struct timespec *now,
 
                                if (rate < rate_iops_min ||
                                    iops < td->rate_blocks[ddir]) {
-                                       log_err("%s: rate_iops_min=%u not met, got %lu IOPS\n",
+                                       log_err("%s: rate_iops_min=%u not met, got %llu IOPS\n",
                                                td->o.name, rate_iops_min, rate);
                                        return true;
                                }
@@ -219,12 +223,10 @@ static bool check_min_rate(struct thread_data *td, struct timespec *now)
 {
        bool ret = false;
 
-       if (td->bytes_done[DDIR_READ])
-               ret |= __check_min_rate(td, now, DDIR_READ);
-       if (td->bytes_done[DDIR_WRITE])
-               ret |= __check_min_rate(td, now, DDIR_WRITE);
-       if (td->bytes_done[DDIR_TRIM])
-               ret |= __check_min_rate(td, now, DDIR_TRIM);
+       for_each_rw_ddir(ddir) {
+               if (td->bytes_done[ddir])
+                       ret |= __check_min_rate(td, now, ddir);
+       }
 
        return ret;
 }
@@ -237,15 +239,10 @@ static void cleanup_pending_aio(struct thread_data *td)
 {
        int r;
 
-       if (td->error)
-               return;
-
        /*
         * get immediately available events, if any
         */
        r = io_u_queued_complete(td, 0);
-       if (r < 0)
-               return;
 
        /*
         * now cancel remaining active events
@@ -1011,12 +1008,6 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
                if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
                    ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
 
-                       if (!td->o.verify_pattern_bytes) {
-                               io_u->rand_seed = __rand(&td->verify_state);
-                               if (sizeof(int) != sizeof(long *))
-                                       io_u->rand_seed *= __rand(&td->verify_state);
-                       }
-
                        if (verify_state_should_stop(td, io_u)) {
                                put_io_u(td, io_u);
                                break;
@@ -1546,7 +1537,7 @@ static void *thread_main(void *data)
        uint64_t bytes_done[DDIR_RWDIR_CNT];
        int deadlock_loop_cnt;
        bool clear_state;
-       int ret;
+       int res, ret;
 
        sk_out_assign(sk_out);
        free(fd);
@@ -1871,17 +1862,21 @@ static void *thread_main(void *data)
         * offload mode so that we don't clean up this job while
         * another thread is checking its io_u's for overlap
         */
-       if (td_offload_overlap(td))
-               pthread_mutex_lock(&overlap_check);
+       if (td_offload_overlap(td)) {
+               int res = pthread_mutex_lock(&overlap_check);
+               assert(res == 0);
+       }
        td_set_runstate(td, TD_FINISHING);
-       if (td_offload_overlap(td))
-               pthread_mutex_unlock(&overlap_check);
+       if (td_offload_overlap(td)) {
+               res = pthread_mutex_unlock(&overlap_check);
+               assert(res == 0);
+       }
 
        update_rusage_stat(td);
        td->ts.total_run_time = mtime_since_now(&td->epoch);
-       td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-       td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-       td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+       for_each_rw_ddir(ddir) {
+               td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+       }
 
        if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
            (td->o.verify != VERIFY_NONE && td_write(td)))
@@ -2047,6 +2042,7 @@ reaped:
 
                done_secs += mtime_since_now(&td->epoch) / 1000;
                profile_td_exit(td);
+               flow_exit_job(td);
        }
 
        if (*nr_running == cputhreads && !pending && realthreads)
@@ -2120,8 +2116,16 @@ static int fio_verify_load_state(struct thread_data *td)
                                        td->thread_number - 1, &data);
                if (!ret)
                        verify_assign_state(td, data);
-       } else
-               ret = verify_load_state(td, "local");
+       } else {
+               char prefix[PATH_MAX];
+
+               if (aux_path)
+                       sprintf(prefix, "%s%clocal", aux_path,
+                                       FIO_OS_PATH_SEPARATOR);
+               else
+                       strcpy(prefix, "local");
+               ret = verify_load_state(td, prefix);
+       }
 
        return ret;
 }