*/
static int check_min_rate(struct thread_data *td, struct timeval *now)
{
+ unsigned long long bytes = 0;
unsigned long spent;
unsigned long rate;
- int ddir = td->ddir;
/*
* allow a 2 second settle period in the beginning
if (mtime_since(&td->start, now) < 2000)
return 0;
+ if (td_read(td))
+ bytes += td->this_io_bytes[DDIR_READ];
+ if (td_write(td))
+ bytes += td->this_io_bytes[DDIR_WRITE];
+
/*
* if rate blocks is set, sample is running
*/
if (spent < td->ratecycle)
return 0;
- rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
- if (rate < td->ratemin) {
- fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ if (bytes < td->rate_bytes) {
+ fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin);
return 1;
+ } else {
+ rate = (bytes - td->rate_bytes) / spent;
+ if (rate < td->ratemin || bytes < td->rate_bytes) {
+ fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ return 1;
+ }
}
}
- td->rate_bytes = td->this_io_bytes[ddir];
+ td->rate_bytes = bytes;
memcpy(&td->lastrate, now, sizeof(*now));
return 0;
}
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0, NULL);
+ r = io_u_queued_complete(td, 0);
if (r < 0)
return;
list_for_each_safe(entry, n, &td->io_u_busylist) {
io_u = list_entry(entry, struct io_u, list);
- r = td->io_ops->cancel(td, io_u);
- if (!r)
+ /*
+ * if the io_u isn't in flight, then that generally
+ * means someone leaked an io_u. complain but fix
+ * it up, so we don't stall here.
+ */
+ if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
+ log_err("fio: non-busy IO on busy list\n");
put_io_u(td, io_u);
+ } else {
+ r = td->io_ops->cancel(td, io_u);
+ if (!r)
+ put_io_u(td, io_u);
+ }
}
}
if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth, NULL);
+ r = io_u_queued_complete(td, td->cur_depth);
}
/*
requeue:
ret = td_io_queue(td, io_u);
if (ret < 0) {
- td_verror(td, io_u->error);
+ td_verror(td, io_u->error, "td_io_queue");
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL))
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
- td_verror(td, io_u->error);
+ td_verror(td, io_u->error, "td_io_queue");
return 1;
}
- if (io_u_sync_complete(td, io_u, NULL) < 0)
+ if (io_u_sync_complete(td, io_u) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
if (!io_u)
break;
- if (runtime_exceeded(td, &io_u->start_time))
+ if (runtime_exceeded(td, &io_u->start_time)) {
+ put_io_u(td, io_u);
break;
+ }
- if (get_next_verify(td, io_u))
+ if (get_next_verify(td, io_u)) {
+ put_io_u(td, io_u);
break;
+ }
- if (td_io_prep(td, io_u))
+ if (td_io_prep(td, io_u)) {
+ put_io_u(td, io_u);
break;
+ }
+ io_u->end_io = verify_io_u;
requeue:
ret = td_io_queue(td, io_u);
io_u->xfer_buf += bytes;
goto requeue;
}
- ret = io_u_sync_complete(td, io_u, verify_io_u);
- if (ret)
+ ret = io_u_sync_complete(td, io_u);
+ if (ret < 0)
break;
continue;
case FIO_Q_QUEUED:
break;
default:
assert(ret < 0);
- td_verror(td, -ret);
+ td_verror(td, -ret, "td_io_queue");
break;
}
* Reap required number of io units, if any, and do the
* verification on them through the callback handler
*/
- if (io_u_queued_complete(td, min_events, verify_io_u))
+ if (io_u_queued_complete(td, min_events) < 0)
break;
}
- if (td->cur_depth)
+ if (!td->error) {
+ min_events = td->cur_depth;
+
+ if (min_events)
+ ret = io_u_queued_complete(td, min_events);
+ } else
cleanup_pending_aio(td);
td_set_runstate(td, TD_RUNNING);
goto requeue;
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u, NULL);
+ bytes_done = io_u_sync_complete(td, io_u);
if (bytes_done < 0)
ret = bytes_done;
break;
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_queued_complete(td, min_evts, NULL);
+ bytes_done = io_u_queued_complete(td, min_evts);
if (bytes_done < 0)
break;
}
*/
usec = utime_since(&s, &comp_time);
- rate_throttle(td, usec, bytes_done, td->ddir);
+ rate_throttle(td, usec, bytes_done);
if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
terminate_threads(td->groupid, 0);
- td_verror(td, ENODATA);
+ td_verror(td, ENODATA, "check_min_rate");
break;
}
if (!td->error) {
struct fio_file *f;
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ i = td->cur_depth;
+ if (i)
+ ret = io_u_queued_complete(td, i);
if (should_fsync(td) && td->end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i)
fio_io_sync(td, f);
}
- }
+ } else
+ cleanup_pending_aio(td);
}
static void cleanup_io_u(struct thread_data *td)
fill_rand_buf(io_u, max_bs);
io_u->index = i;
+ io_u->flags = IO_U_F_FREE;
list_add(&io_u->list, &td->io_u_freelist);
}
+ io_u_init_timeout();
+
return 0;
}
f = fopen(tmp, "r+");
if (!f) {
- td_verror(td, errno);
+ td_verror(td, errno, "fopen");
return 1;
}
*/
ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
if (ferror(f) || ret != 1) {
- td_verror(td, errno);
+ td_verror(td, errno, "fwrite");
fclose(f);
return 1;
}
*/
ret = fread(tmp, 1, sizeof(tmp), f);
if (ferror(f) || ret < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "fread");
fclose(f);
return 1;
}
sprintf(tmp2, "[%s]", td->ioscheduler);
if (!strstr(tmp, tmp2)) {
log_err("fio: io scheduler %s not found\n", td->ioscheduler);
- td_verror(td, EINVAL);
+ td_verror(td, EINVAL, "iosched_switch");
fclose(f);
return 1;
}
goto err;
if (fio_setaffinity(td) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "cpu_set_affinity");
goto err;
}
if (td->ioprio) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "ioprio_set");
goto err;
}
}
if (nice(td->nice) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "nice");
goto err;
}
}
fio_gettime(&td->epoch, NULL);
+ memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
getrusage(RUSAGE_SELF, &td->ts.ru_start);
runtime[0] = runtime[1] = 0;
else
do_io(td);
- runtime[td->ddir] += utime_since_now(&td->start);
- if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
- runtime[td->ddir ^ 1] = runtime[td->ddir];
-
+ if (td_read(td) && td->io_bytes[DDIR_READ])
+ runtime[DDIR_READ] += utime_since_now(&td->start);
+ if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ runtime[DDIR_WRITE] += utime_since_now(&td->start);
+
if (td->error || td->terminate)
break;
*/
pending = cputhreads = 0;
for_each_td(td, i) {
+ int flags = 0;
+
/*
* ->io_ops is NULL for a thread that has closed its
* io engine
if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
cputhreads++;
- if (td->runstate < TD_EXITED) {
- /*
- * check if someone quit or got killed in an unusual way
- */
- ret = waitpid(td->pid, &status, WNOHANG);
- if (ret < 0)
- perror("waitpid");
- else if ((ret == td->pid) && WIFSIGNALED(status)) {
- int sig = WTERMSIG(status);
-
- log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+ if (!td->pid || td->runstate == TD_REAPED)
+ continue;
+ if (td->use_thread) {
+ if (td->runstate == TD_EXITED) {
td_set_runstate(td, TD_REAPED);
goto reaped;
}
+ continue;
}
- if (td->runstate != TD_EXITED) {
- if (td->runstate < TD_RUNNING)
- pending++;
+ flags = WNOHANG;
+ if (td->runstate == TD_EXITED)
+ flags = 0;
- continue;
- }
+ /*
+ * check if someone quit or got killed in an unusual way
+ */
+ ret = waitpid(td->pid, &status, flags);
+ if (ret < 0) {
+ if (errno == ECHILD) {
+ log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate);
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ perror("waitpid");
+ } else if (ret == td->pid) {
+ if (WIFSIGNALED(status)) {
+ int sig = WTERMSIG(status);
- if (td->error)
- exit_value++;
+ log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) && !td->error)
+ td->error = WEXITSTATUS(status);
- td_set_runstate(td, TD_REAPED);
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ }
+ /*
+ * thread is not dead, continue
+ */
+ continue;
+reaped:
if (td->use_thread) {
long ret;
if (pthread_join(td->thread, (void *) &ret))
- perror("thread_join");
- } else {
- int status;
-
- ret = waitpid(td->pid, &status, 0);
- if (ret < 0)
- perror("waitpid");
- else if (WIFEXITED(status) && WEXITSTATUS(status)) {
- if (!exit_value)
- exit_value++;
- }
+ perror("pthread_join");
}
-reaped:
(*nr_running)--;
(*m_rate) -= td->ratemin;
(*t_rate) -= td->rate;
+
+ if (td->error)
+ exit_value++;
}
if (*nr_running == cputhreads && !pending)
init_disk_util(td);
}
+ set_genesis_time();
+
while (todo) {
struct thread_data *map[MAX_JOBS];
struct timeval this_start;
setup_log(&agg_io_log[DDIR_WRITE]);
}
+ set_genesis_time();
+
disk_util_timer_arm();
run_threads();