int min_evts = 0;
int ret;
+ if (td->flags & TD_F_REGROW_LOGS) {
+ ret = io_u_quiesce(td);
+ regrow_logs(td);
+ return ret;
+ }
+
/*
* if the queue is full, we MUST reap at least 1 event
*/
break;
}
+ if (td->flags & TD_F_REGROW_LOGS)
+ regrow_logs(td);
+
/*
* when doing I/O (not when verifying),
* check for any errors that are to be ignored
struct thread_data *td = fd->td;
struct thread_options *o = &td->o;
struct sk_out *sk_out = fd->sk_out;
- pthread_condattr_t attr;
int clear_state;
int ret;
INIT_FLIST_HEAD(&td->verify_list);
INIT_FLIST_HEAD(&td->trim_list);
INIT_FLIST_HEAD(&td->next_rand_list);
- pthread_mutex_init(&td->io_u_lock, NULL);
td->io_hist_tree = RB_ROOT;
- pthread_condattr_init(&attr);
- pthread_cond_init(&td->verify_cond, &attr);
- pthread_cond_init(&td->free_cond, &attr);
+ ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond);
+ if (ret) {
+ td_verror(td, ret, "mutex_cond_init_pshared");
+ goto err;
+ }
+ ret = cond_init_pshared(&td->verify_cond);
+ if (ret) {
+ td_verror(td, ret, "mutex_cond_pshared");
+ goto err;
+ }
td_set_runstate(td, TD_INITIALIZED);
dprint(FD_MUTEX, "up startup_mutex\n");
goto err;
}
+ /*
+ * Do this early, we don't want the compress threads to be limited
+ * to the same CPUs as the IO workers. So do this before we set
+ * any potential CPU affinity
+ */
+ if (iolog_compress_init(td, sk_out))
+ goto err;
+
/*
* If we have a gettimeofday() thread, make sure we exclude that
* thread from this job
goto err;
}
- if (iolog_compress_init(td, sk_out))
- goto err;
-
fio_verify_init(td);
if (rate_submit_init(td, sk_out))
break;
}
+ td_set_runstate(td, TD_FINISHING);
+
update_rusage_stat(td);
td->ts.total_run_time = mtime_since_now(&td->epoch);
td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
static void dump_td_info(struct thread_data *td)
{
- log_err("fio: job '%s' hasn't exited in %lu seconds, it appears to "
- "be stuck. Doing forceful exit of this job.\n", td->o.name,
+ log_err("fio: job '%s' (state=%d) hasn't exited in %lu seconds, it "
+ "appears to be stuck. Doing forceful exit of this job.\n",
+ td->o.name, td->runstate,
(unsigned long) time_since_now(&td->terminate_time));
}
* move on.
*/
if (td->terminate &&
+ td->runstate < TD_FSYNCING &&
time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
dump_td_info(td);
td_set_runstate(td, TD_REAPED);