static void terminate_threads(int group_id)
{
+ struct thread_data *td;
int i;
- for (i = 0; i < thread_number; i++) {
- struct thread_data *td = &threads[i];
-
+ for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
td->terminate = 1;
td->start_delay = 0;
* Check if it's time to seed a new data direction.
*/
if (elapsed >= td->rwmixcycle) {
- int v;
+ unsigned int v;
long r;
r = os_random_long(&td->rwmix_state);
static struct fio_file *get_next_file(struct thread_data *td)
{
- int old_next_file = td->next_file;
+ unsigned int old_next_file = td->next_file;
struct fio_file *f;
do {
}
}
-static int do_io_u_verify(struct thread_data *td, struct io_u **io_u)
-{
- struct io_u *v_io_u = *io_u;
- int ret = 0;
-
- if (v_io_u) {
- ret = verify_io_u(v_io_u);
- put_io_u(td, v_io_u);
- *io_u = NULL;
- }
-
- return ret;
-}
-
/*
* The main verify engine. Runs over the writes we previusly submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
-static void do_verify(struct thread_data *td)
+void do_verify(struct thread_data *td)
{
struct timeval t;
struct io_u *io_u, *v_io_u = NULL;
struct io_completion_data icd;
struct fio_file *f;
- int ret;
+ int ret, i;
+
+ /*
+ * sync io first and invalidate cache, to make sure we really
+ * read from disk.
+ */
+ for_each_file(td, f, i) {
+ td_io_sync(td, f);
+ file_invalidate_cache(td, f);
+ }
td_set_runstate(td, TD_VERIFYING);
struct timeval s, e;
unsigned long usec;
struct fio_file *f;
- int i;
+ int i, ret = 0;
td_set_runstate(td, TD_RUNNING);
while (td->this_io_bytes[td->ddir] < td->io_size) {
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct timespec *timeout;
- int ret, min_evts = 0;
+ int min_evts = 0;
struct io_u *io_u;
if (td->terminate)
min_evts = 1;
}
+
ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
if (ret < 0) {
- td_verror(td, ret);
+ td_verror(td, -ret);
break;
} else if (!ret)
continue;
td_io_sync(td, f);
}
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ if (!ret) {
+ if (td->cur_depth)
+ cleanup_pending_aio(td);
- if (should_fsync(td) && td->end_fsync) {
- td_set_runstate(td, TD_FSYNCING);
- for_each_file(td, f, i)
- td_io_sync(td, f);
+ if (should_fsync(td) && td->end_fsync) {
+ td_set_runstate(td, TD_FSYNCING);
+ for_each_file(td, f, i)
+ td_io_sync(td, f);
+ }
}
}
*/
static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
{
+ struct thread_data *td;
int i, cputhreads;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
- for (i = 0, cputhreads = 0; i < thread_number; i++) {
- struct thread_data *td = &threads[i];
-
- if (td->io_ops->flags & FIO_CPUIO)
+ cputhreads = 0;
+ for_each_td(td, i) {
+ /*
+ * ->io_ops is NULL for a thread that has closed its
+ * io engine
+ */
+ if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
cputhreads++;
if (td->runstate != TD_EXITED)
nr_started = 0;
m_rate = t_rate = 0;
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
print_status_init(td->thread_number - 1);
init_disk_util(td);
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
*/
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
if (td->runstate != TD_NOT_CREATED)
continue;
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
if (td->runstate != TD_INITIALIZED)
continue;