+ if (td->o.verify != VERIFY_NONE) {
+ td->last_write_comp = scalloc(max_units, sizeof(uint64_t));
+ if (!td->last_write_comp) {
+ log_err("fio: failed to alloc write comp data\n");
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int switch_ioscheduler(struct thread_data *td)
+{
+ char tmp[256], tmp2[128];
+ FILE *f;
+ int ret;
+
+ if (td->io_ops->flags & FIO_DISKLESSIO)
+ return 0;
+
+ sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
+
+ f = fopen(tmp, "r+");
+ if (!f) {
+ if (errno == ENOENT) {
+ log_err("fio: os or kernel doesn't support IO scheduler"
+ " switching\n");
+ return 0;
+ }
+ td_verror(td, errno, "fopen iosched");
+ return 1;
+ }
+
+ /*
+ * Set io scheduler.
+ */
+ ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
+ if (ferror(f) || ret != 1) {
+ td_verror(td, errno, "fwrite");
+ fclose(f);
+ return 1;
+ }
+
+ rewind(f);
+
+ /*
+ * Read back and check that the selected scheduler is now the default.
+ */
+ memset(tmp, 0, sizeof(tmp));
+ ret = fread(tmp, sizeof(tmp), 1, f);
+ if (ferror(f) || ret < 0) {
+ td_verror(td, errno, "fread");
+ fclose(f);
+ return 1;
+ }
+ /*
+ * either a list of io schedulers or "none\n" is expected.
+ */
+ tmp[strlen(tmp) - 1] = '\0';
+
+
+ sprintf(tmp2, "[%s]", td->o.ioscheduler);
+ if (!strstr(tmp, tmp2)) {
+ log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
+ td_verror(td, EINVAL, "iosched_switch");
+ fclose(f);
+ return 1;
+ }
+
+ fclose(f);
+ return 0;
+}
+
+static bool keep_running(struct thread_data *td)
+{
+ unsigned long long limit;
+
+ if (td->done)
+ return false;
+ if (td->o.time_based)
+ return true;
+ if (td->o.loops) {
+ td->o.loops--;
+ return true;
+ }
+ if (exceeds_number_ios(td))
+ return false;
+
+ if (td->o.io_limit)
+ limit = td->o.io_limit;
+ else
+ limit = td->o.size;
+
+ if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) {
+ uint64_t diff;
+
+ /*
+ * If the difference is less than the minimum IO size, we
+ * are done.
+ */
+ diff = limit - ddir_rw_sum(td->io_bytes);
+ if (diff < td_max_bs(td))
+ return false;
+
+ if (fio_files_done(td))
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int exec_string(struct thread_options *o, const char *string, const char *mode)
+{
+ size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+ int ret;
+ char *str;
+
+ str = malloc(newlen);
+ sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
+
+ log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
+ ret = system(str);
+ if (ret == -1)
+ log_err("fio: exec of cmd <%s> failed\n", str);
+
+ free(str);
+ return ret;
+}
+
+/*
+ * Dry run to compute correct state of numberio for verification.
+ */
+static uint64_t do_dry_run(struct thread_data *td)
+{
+ td_set_runstate(td, TD_RUNNING);
+
+ while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
+ (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) {
+ struct io_u *io_u;
+ int ret;
+
+ if (td->terminate || td->done)
+ break;
+
+ io_u = get_io_u(td);
+ if (!io_u)
+ break;
+
+ io_u_set(io_u, IO_U_F_FLIGHT);
+ io_u->error = 0;
+ io_u->resid = 0;
+ if (ddir_rw(acct_ddir(io_u)))
+ td->io_issues[acct_ddir(io_u)]++;
+ if (ddir_rw(io_u->ddir)) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+
+ if (td_write(td) && io_u->ddir == DDIR_WRITE &&
+ td->o.do_verify &&
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
+ log_io_piece(td, io_u);
+
+ ret = io_u_sync_complete(td, io_u);
+ (void) ret;
+ }
+
+ return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
+}
+
+static void io_workqueue_fn(struct submit_worker *sw,
+ struct workqueue_work *work)
+{
+ struct io_u *io_u = container_of(work, struct io_u, work);
+ const enum fio_ddir ddir = io_u->ddir;
+ struct thread_data *td = sw->private;
+ int ret;
+
+ dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
+
+ io_u_set(io_u, IO_U_F_NO_FILE_PUT);
+
+ td->cur_depth++;
+
+ do {
+ ret = td_io_queue(td, io_u);
+ if (ret != FIO_Q_BUSY)
+ break;
+ ret = io_u_queued_complete(td, 1);
+ if (ret > 0)
+ td->cur_depth -= ret;
+ io_u_clear(io_u, IO_U_F_FLIGHT);
+ } while (1);
+
+ dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
+
+ io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
+
+ if (ret == FIO_Q_COMPLETED)
+ td->cur_depth--;
+ else if (ret == FIO_Q_QUEUED) {
+ unsigned int min_evts;
+
+ if (td->o.iodepth == 1)
+ min_evts = 1;
+ else
+ min_evts = 0;
+
+ ret = io_u_queued_complete(td, min_evts);
+ if (ret > 0)
+ td->cur_depth -= ret;
+ } else if (ret == FIO_Q_BUSY) {
+ ret = io_u_queued_complete(td, td->cur_depth);
+ if (ret > 0)
+ td->cur_depth -= ret;
+ }
+}
+
+static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
+{
+ struct thread_data *td = sw->private;
+
+ if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
+ return true;
+
+ return false;