return !td->o.iodepth_batch_complete_min &&
!td->o.iodepth_batch_complete_max;
}
+/*
+ * Unlinks files from thread data fio_file structure
+ */
+static int unlink_all_files(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+ int ret = 0;
+
+ for_each_file(td, f, i) {
+ if (f->filetype != FIO_TYPE_FILE)
+ continue;
+ ret = td_io_unlink_file(td, f);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ td_verror(td, ret, "unlink_all_files");
+
+ return ret;
+}
/*
* The main verify engine. Runs over the writes we previously submitted,
break;
while ((io_u = get_io_u(td)) != NULL) {
- if (IS_ERR(io_u)) {
+ if (IS_ERR_OR_NULL(io_u)) {
io_u = NULL;
ret = FIO_Q_BUSY;
goto reap;
continue;
} else if (io_u->ddir == DDIR_TRIM) {
io_u->ddir = DDIR_READ;
- io_u_set(io_u, IO_U_F_TRIMMED);
+ io_u_set(td, io_u, IO_U_F_TRIMMED);
break;
} else if (io_u->ddir == DDIR_WRITE) {
io_u->ddir = DDIR_READ;
return number_ios >= (td->o.number_ios * td->loops);
}
-static bool io_issue_bytes_exceeded(struct thread_data *td)
+static bool io_bytes_exceeded(struct thread_data *td, uint64_t *this_bytes)
{
unsigned long long bytes, limit;
if (td_rw(td))
- bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
+ bytes = this_bytes[DDIR_READ] + this_bytes[DDIR_WRITE];
else if (td_write(td))
- bytes = td->io_issue_bytes[DDIR_WRITE];
+ bytes = this_bytes[DDIR_WRITE];
else if (td_read(td))
- bytes = td->io_issue_bytes[DDIR_READ];
+ bytes = this_bytes[DDIR_READ];
else
- bytes = td->io_issue_bytes[DDIR_TRIM];
+ bytes = this_bytes[DDIR_TRIM];
if (td->o.io_limit)
limit = td->o.io_limit;
return bytes >= limit || exceeds_number_ios(td);
}
-static bool io_complete_bytes_exceeded(struct thread_data *td)
+static bool io_issue_bytes_exceeded(struct thread_data *td)
{
- unsigned long long bytes, limit;
-
- if (td_rw(td))
- bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
- else if (td_write(td))
- bytes = td->this_io_bytes[DDIR_WRITE];
- else if (td_read(td))
- bytes = td->this_io_bytes[DDIR_READ];
- else
- bytes = td->this_io_bytes[DDIR_TRIM];
-
- if (td->o.io_limit)
- limit = td->o.io_limit;
- else
- limit = td->o.size;
+ return io_bytes_exceeded(td, td->io_issue_bytes);
+}
- limit *= td->loops;
- return bytes >= limit || exceeds_number_ios(td);
+static bool io_complete_bytes_exceeded(struct thread_data *td)
+{
+ return io_bytes_exceeded(td, td->this_io_bytes);
}
/*
if (ret < 0)
break;
if (!ddir_rw_sum(td->bytes_done) &&
- !(td->io_ops->flags & FIO_NOIO))
+ !td_ioengine_flagged(td, FIO_NOIO))
continue;
if (!in_ramp_time(td) && should_check_rate(td)) {
td->orig_buffer_size = (unsigned long long) max_bs
* (unsigned long long) max_units;
- if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
+ if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
data_xfer = 0;
err = 0;
* lucky and the allocator gives us an aligned address.
*/
if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
- (td->io_ops->flags & FIO_RAWIO))
+ td_ioengine_flagged(td, FIO_RAWIO))
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
return 1;
if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
- (td->io_ops->flags & FIO_RAWIO))
+ td_ioengine_flagged(td, FIO_RAWIO))
p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
else
p = td->orig_buffer;
static int switch_ioscheduler(struct thread_data *td)
{
+#ifdef FIO_HAVE_IOSCHED_SWITCH
char tmp[256], tmp2[128];
FILE *f;
int ret;
- if (td->io_ops->flags & FIO_DISKLESSIO)
+ if (td_ioengine_flagged(td, FIO_DISKLESSIO))
return 0;
sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
*/
tmp[strlen(tmp) - 1] = '\0';
+ /*
+ * Write to "none" entry doesn't fail, so check the result here.
+ */
+ if (!strcmp(tmp, "none")) {
+ log_err("fio: io scheduler is not tunable\n");
+ fclose(f);
+ return 0;
+ }
sprintf(tmp2, "[%s]", td->o.ioscheduler);
if (!strstr(tmp, tmp2)) {
fclose(f);
return 0;
+#else
+ return 0;
+#endif
}
static bool keep_running(struct thread_data *td)
break;
io_u = get_io_u(td);
- if (!io_u)
+ if (IS_ERR_OR_NULL(io_u))
break;
- io_u_set(io_u, IO_U_F_FLIGHT);
+ io_u_set(td, io_u, IO_U_F_FLIGHT);
io_u->error = 0;
io_u->resid = 0;
if (ddir_rw(acct_ddir(io_u)))
struct thread_data *td = fd->td;
struct thread_options *o = &td->o;
struct sk_out *sk_out = fd->sk_out;
+ int deadlock_loop_cnt;
int clear_state;
int ret;
if (rate_submit_init(td, sk_out))
goto err;
- fio_gettime(&td->epoch, NULL);
+ set_epoch_time(td, o->log_unix_epoch);
fio_getrusage(&td->ru_start);
memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
fio_gettime(&td->start, NULL);
memcpy(&td->tv_cache, &td->start, sizeof(td->start));
- if (clear_state)
+ if (clear_state) {
clear_io_state(td, 0);
+ if (o->unlink_each_loop && unlink_all_files(td))
+ break;
+ }
+
prune_io_piece_log(td);
if (td->o.verify_only && (td_write(td) || td_rw(td)))
}
}
+ /*
+ * If we took too long to shut down, the main thread could
+ * already consider us reaped/exited. If that happens, break
+ * out and clean up.
+ */
+ if (td->runstate >= TD_EXITED)
+ break;
+
clear_state = 1;
/*
* the rusage_sem, which would never get upped because
* this thread is waiting for the stat mutex.
*/
- check_update_rusage(td);
+ deadlock_loop_cnt = 0;
+ do {
+ check_update_rusage(td);
+ if (!fio_mutex_down_trylock(stat_mutex))
+ break;
+ usleep(1000);
+ if (deadlock_loop_cnt++ > 5000) {
+ log_err("fio seems to be stuck grabbing stat_mutex, forcibly exiting\n");
+ td->error = EDEADLK;
+ goto err;
+ }
+ } while (1);
- fio_mutex_down(stat_mutex);
if (td_read(td) && td->io_bytes[DDIR_READ])
update_runtime(td, elapsed_us, DDIR_READ);
if (td_write(td) && td->io_bytes[DDIR_WRITE])
if (!o->do_verify ||
o->verify == VERIFY_NONE ||
- (td->io_ops->flags & FIO_UNIDIR))
+ td_ioengine_flagged(td, FIO_UNIDIR))
continue;
clear_io_state(td, 0);