int min_evts = 0;
int ret;
- if (td->flags & TD_F_REGROW_LOGS) {
- ret = io_u_quiesce(td);
- regrow_logs(td);
- return ret;
- }
+ if (td->flags & TD_F_REGROW_LOGS)
+ return io_u_quiesce(td);
/*
* if the queue is full, we MUST reap at least 1 event
return number_ios >= (td->o.number_ios * td->loops);
}
-static bool io_issue_bytes_exceeded(struct thread_data *td)
+static bool io_bytes_exceeded(struct thread_data *td, uint64_t *this_bytes)
{
unsigned long long bytes, limit;
if (td_rw(td))
- bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
+ bytes = this_bytes[DDIR_READ] + this_bytes[DDIR_WRITE];
else if (td_write(td))
- bytes = td->io_issue_bytes[DDIR_WRITE];
+ bytes = this_bytes[DDIR_WRITE];
else if (td_read(td))
- bytes = td->io_issue_bytes[DDIR_READ];
+ bytes = this_bytes[DDIR_READ];
else
- bytes = td->io_issue_bytes[DDIR_TRIM];
+ bytes = this_bytes[DDIR_TRIM];
if (td->o.io_limit)
limit = td->o.io_limit;
return bytes >= limit || exceeds_number_ios(td);
}
-static bool io_complete_bytes_exceeded(struct thread_data *td)
+static bool io_issue_bytes_exceeded(struct thread_data *td)
{
- unsigned long long bytes, limit;
-
- if (td_rw(td))
- bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
- else if (td_write(td))
- bytes = td->this_io_bytes[DDIR_WRITE];
- else if (td_read(td))
- bytes = td->this_io_bytes[DDIR_READ];
- else
- bytes = td->this_io_bytes[DDIR_TRIM];
-
- if (td->o.io_limit)
- limit = td->o.io_limit;
- else
- limit = td->o.size;
+ return io_bytes_exceeded(td, td->io_issue_bytes);
+}
- limit *= td->loops;
- return bytes >= limit || exceeds_number_ios(td);
+static bool io_complete_bytes_exceeded(struct thread_data *td)
+{
+ return io_bytes_exceeded(td, td->this_io_bytes);
}
/*
struct thread_data *td = fd->td;
struct thread_options *o = &td->o;
struct sk_out *sk_out = fd->sk_out;
+ int deadlock_loop_cnt;
int clear_state;
int ret;
fio_getrusage(&td->ru_start);
memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
+ memcpy(&td->ss.prev_time, &td->epoch, sizeof(td->epoch));
if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
o->ratemin[DDIR_TRIM]) {
}
}
+ /*
+ * If we took too long to shut down, the main thread could
+ * already consider us reaped/exited. If that happens, break
+ * out and clean up.
+ */
+ if (td->runstate >= TD_EXITED)
+ break;
+
clear_state = 1;
/*
* the rusage_sem, which would never get upped because
* this thread is waiting for the stat mutex.
*/
+ deadlock_loop_cnt = 0;
do {
check_update_rusage(td);
if (!fio_mutex_down_trylock(stat_mutex))
break;
usleep(1000);
+ if (deadlock_loop_cnt++ > 5000) {
+ log_err("fio seems to be stuck grabbing stat_mutex, forcibly exiting\n");
+ td->error = EDEADLK;
+ goto err;
+ }
} while (1);
if (td_read(td) && td->io_bytes[DDIR_READ])
/*
* Run over the job map and reap the threads that have exited, if any.
*/
-static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
- unsigned int *m_rate)
+static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
+ uint64_t *m_rate)
{
struct thread_data *td;
unsigned int cputhreads, realthreads, pending;
static void run_threads(struct sk_out *sk_out)
{
struct thread_data *td;
- unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
+ unsigned int i, todo, nr_running, nr_started;
+ uint64_t m_rate, t_rate;
uint64_t spent;
if (fio_gtod_offload && fio_start_gtod_thread())
}
for_each_td(td, i) {
+ if (td->ss.dur) {
+ if (td->ss.iops_data != NULL) {
+ free(td->ss.iops_data);
+ free(td->ss.bw_data);
+ }
+ }
fio_options_free(td);
if (td->rusage_sem) {
fio_mutex_remove(td->rusage_sem);