*/
static int check_min_rate(struct thread_data *td, struct timeval *now)
{
+ unsigned long long bytes = 0;
unsigned long spent;
unsigned long rate;
- int ddir = td->ddir;
+
+ /*
+ * No minimum rate set, always ok
+ */
+ if (!td->ratemin)
+ return 0;
/*
* allow a 2 second settle period in the beginning
if (mtime_since(&td->start, now) < 2000)
return 0;
+ if (td_read(td))
+ bytes += td->this_io_bytes[DDIR_READ];
+ if (td_write(td))
+ bytes += td->this_io_bytes[DDIR_WRITE];
+
/*
* if rate blocks is set, sample is running
*/
if (spent < td->ratecycle)
return 0;
- rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
- if (rate < td->ratemin) {
- fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ if (bytes < td->rate_bytes) {
+ fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin);
return 1;
+ } else {
+ rate = (bytes - td->rate_bytes) / spent;
+ if (rate < td->ratemin || bytes < td->rate_bytes) {
+ fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ return 1;
+ }
}
}
- td->rate_bytes = td->this_io_bytes[ddir];
+ td->rate_bytes = bytes;
memcpy(&td->lastrate, now, sizeof(*now));
return 0;
}
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0, NULL);
+ r = io_u_queued_complete(td, 0);
if (r < 0)
return;
}
if (td->cur_depth)
- r = io_u_queued_complete(td, td->cur_depth, NULL);
+ r = io_u_queued_complete(td, td->cur_depth);
}
/*
requeue:
ret = td_io_queue(td, io_u);
if (ret < 0) {
- td_verror(td, io_u->error);
+ td_verror(td, io_u->error, "td_io_queue");
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL) < 0)
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
- td_verror(td, io_u->error);
+ td_verror(td, io_u->error, "td_io_queue");
return 1;
}
- if (io_u_sync_complete(td, io_u, NULL) < 0)
+ if (io_u_sync_complete(td, io_u) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
put_io_u(td, io_u);
break;
}
+
+ io_u->end_io = verify_io_u;
requeue:
ret = td_io_queue(td, io_u);
io_u->xfer_buf += bytes;
goto requeue;
}
- ret = io_u_sync_complete(td, io_u, verify_io_u);
+ ret = io_u_sync_complete(td, io_u);
if (ret < 0)
break;
continue;
break;
default:
assert(ret < 0);
- td_verror(td, -ret);
+ td_verror(td, -ret, "td_io_queue");
break;
}
* Reap required number of io units, if any, and do the
* verification on them through the callback handler
*/
- if (io_u_queued_complete(td, min_events, verify_io_u) < 0)
+ if (io_u_queued_complete(td, min_events) < 0)
break;
}
- if (td->cur_depth)
+ if (!td->error) {
+ min_events = td->cur_depth;
+
+ if (min_events)
+ ret = io_u_queued_complete(td, min_events);
+ } else
cleanup_pending_aio(td);
td_set_runstate(td, TD_RUNNING);
goto requeue;
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_sync_complete(td, io_u, NULL);
+ bytes_done = io_u_sync_complete(td, io_u);
if (bytes_done < 0)
ret = bytes_done;
break;
}
fio_gettime(&comp_time, NULL);
- bytes_done = io_u_queued_complete(td, min_evts, NULL);
+ bytes_done = io_u_queued_complete(td, min_evts);
if (bytes_done < 0)
break;
}
*/
usec = utime_since(&s, &comp_time);
- rate_throttle(td, usec, bytes_done, td->ddir);
+ rate_throttle(td, usec, bytes_done);
if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
terminate_threads(td->groupid, 0);
- td_verror(td, ENODATA);
+ td_verror(td, ENODATA, "check_min_rate");
break;
}
if (!td->error) {
struct fio_file *f;
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ i = td->cur_depth;
+ if (i)
+ ret = io_u_queued_complete(td, i);
if (should_fsync(td) && td->end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i)
fio_io_sync(td, f);
}
- }
+ } else
+ cleanup_pending_aio(td);
}
static void cleanup_io_u(struct thread_data *td)
f = fopen(tmp, "r+");
if (!f) {
- td_verror(td, errno);
+ td_verror(td, errno, "fopen");
return 1;
}
*/
ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
if (ferror(f) || ret != 1) {
- td_verror(td, errno);
+ td_verror(td, errno, "fwrite");
fclose(f);
return 1;
}
*/
ret = fread(tmp, 1, sizeof(tmp), f);
if (ferror(f) || ret < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "fread");
fclose(f);
return 1;
}
sprintf(tmp2, "[%s]", td->ioscheduler);
if (!strstr(tmp, tmp2)) {
log_err("fio: io scheduler %s not found\n", td->ioscheduler);
- td_verror(td, EINVAL);
+ td_verror(td, EINVAL, "iosched_switch");
fclose(f);
return 1;
}
goto err;
if (fio_setaffinity(td) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "cpu_set_affinity");
goto err;
}
if (td->ioprio) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "ioprio_set");
goto err;
}
}
if (nice(td->nice) == -1) {
- td_verror(td, errno);
+ td_verror(td, errno, "nice");
goto err;
}
else
do_io(td);
- runtime[td->ddir] += utime_since_now(&td->start);
- if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
- runtime[td->ddir ^ 1] = runtime[td->ddir];
-
+ if (td_read(td) && td->io_bytes[DDIR_READ])
+ runtime[DDIR_READ] += utime_since_now(&td->start);
+ if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ runtime[DDIR_WRITE] += utime_since_now(&td->start);
+
if (td->error || td->terminate)
break;
*/
pending = cputhreads = 0;
for_each_td(td, i) {
- int flags;
+ int flags = 0;
/*
* ->io_ops is NULL for a thread that has closed its
if (!td->pid || td->runstate == TD_REAPED)
continue;
+ if (td->use_thread) {
+ if (td->runstate == TD_EXITED) {
+ td_set_runstate(td, TD_REAPED);
+ goto reaped;
+ }
+ continue;
+ }
flags = WNOHANG;
if (td->runstate == TD_EXITED)
if (WIFEXITED(status)) {
if (WEXITSTATUS(status) && !td->error)
td->error = WEXITSTATUS(status);
- if (td->use_thread) {
- long ret;
- if (pthread_join(td->thread, (void *) &ret))
- perror("pthread_join");
- }
td_set_runstate(td, TD_REAPED);
goto reaped;
}
*/
continue;
reaped:
+ if (td->use_thread) {
+ long ret;
+
+ if (pthread_join(td->thread, (void *) &ret))
+ perror("pthread_join");
+ }
+
(*nr_running)--;
(*m_rate) -= td->ratemin;
(*t_rate) -= td->rate;