#ifdef WIN32
static void sig_break(int sig)
{
- struct thread_data *td;
- int i;
-
sig_int(sig);
/**
* Windows terminates all job processes on SIGBREAK after the handler
* returns, so give them time to wrap-up and give stats
*/
- for_each_td(td, i) {
+ for_each_td(td) {
while (td->runstate < TD_EXITED)
sleep(1);
- }
+ } end_for_each();
}
#endif
static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
uint64_t *m_rate)
{
- struct thread_data *td;
unsigned int cputhreads, realthreads, pending;
- int i, status, ret;
+ int status, ret;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
realthreads = pending = cputhreads = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
int flags = 0;
if (!strcmp(td->o.ioengine, "cpuio"))
done_secs += mtime_since_now(&td->epoch) / 1000;
profile_td_exit(td);
flow_exit_job(td);
- }
+ } end_for_each();
if (*nr_running == cputhreads && !pending && realthreads)
fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
{
const char *waitee = me->o.wait_for;
const char *self = me->o.name;
- struct thread_data *td;
- int i;
if (!waitee)
return false;
- for_each_td(td, i) {
+ for_each_td(td) {
if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee))
continue;
runstate_to_name(td->runstate));
return true;
}
- }
+ } end_for_each();
dprint(FD_PROCESS, "%s: %s completed, can run\n", self, waitee);
return false;
set_sig_handlers();
nr_thread = nr_process = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if (check_mount_writes(td))
return;
if (td->o.use_thread)
nr_thread++;
else
nr_process++;
- }
+ } end_for_each();
if (output_format & FIO_OUTPUT_NORMAL) {
struct buf_output out;
nr_started = 0;
m_rate = t_rate = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
print_status_init(td->thread_number - 1);
if (!td->o.create_serialize)
td_io_close_file(td, f);
}
}
- }
+ } end_for_each();
/* start idle threads before io threads start to run */
fio_idle_prof_start();
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate != TD_NOT_CREATED)
continue;
ret = (int)(uintptr_t)thread_main(fd);
_exit(ret);
- } else if (i == fio_debug_jobno)
+ } else if (__td_index == fio_debug_jobno)
*fio_debug_jobp = pid;
free(eo);
free(fd);
break;
}
dprint(FD_MUTEX, "done waiting on startup_sem\n");
- }
+ } end_for_each();
/*
* Wait for the started threads to transition to
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate != TD_INITIALIZED)
continue;
t_rate += ddir_rw_sum(td->o.rate);
todo--;
fio_sem_up(td->sem);
- }
+ } end_for_each();
reap_threads(&nr_running, &t_rate, &m_rate);
int fio_backend(struct sk_out *sk_out)
{
- struct thread_data *td;
int i;
-
if (exec_profile) {
if (load_profile(exec_profile))
return 1;
}
}
- for_each_td(td, i) {
+ for_each_td(td) {
struct thread_stat *ts = &td->ts;
free_clat_prio_stats(ts);
}
fio_sem_remove(td->sem);
td->sem = NULL;
- }
+ } end_for_each();
free_disk_util();
if (cgroup_list) {
*/
int init_global_dedupe_working_set_seeds(void)
{
- int i;
- struct thread_data *td;
-
- for_each_td(td, i) {
+ for_each_td(td) {
if (!td->o.dedupe_global)
continue;
if (init_dedupe_working_set_seeds(td, 1))
return 1;
- }
+ } end_for_each();
return 0;
}
*/
static int total_threaded_subjobs(bool hipri)
{
- struct thread_data *td;
- unsigned int i;
int count = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
const struct fio_blkio_options *options = td->eo;
if (strcmp(td->o.ioengine, "libblkio") == 0 &&
td->o.use_thread && (bool)options->hipri == hipri)
++count;
- }
+ } end_for_each();
return count;
}
*/
bool calc_thread_status(struct jobs_eta *je, int force)
{
- struct thread_data *td;
- int i, unified_rw_rep;
+ int unified_rw_rep;
bool any_td_in_ramp;
uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
bw_avg_time = ULONG_MAX;
unified_rw_rep = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
unified_rw_rep += td->o.unified_rw_rep;
if (is_power_of_2(td->o.kb_base))
je->is_pow2 = 1;
je->nr_pending++;
if (je->elapsed_sec >= 3)
- eta_secs[i] = thread_eta(td);
+ eta_secs[__td_index] = thread_eta(td);
else
- eta_secs[i] = INT_MAX;
+ eta_secs[__td_index] = INT_MAX;
check_str_update(td);
}
}
}
- }
+ } end_for_each();
if (exitall_on_terminate) {
je->eta_sec = INT_MAX;
- for_each_td(td, i) {
- if (eta_secs[i] < je->eta_sec)
- je->eta_sec = eta_secs[i];
- }
+ for_each_td_index() {
+ if (eta_secs[__td_index] < je->eta_sec)
+ je->eta_sec = eta_secs[__td_index];
+ } end_for_each();
} else {
unsigned long eta_stone = 0;
je->eta_sec = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if ((td->runstate == TD_NOT_CREATED) && td->o.stonewall)
- eta_stone += eta_secs[i];
+ eta_stone += eta_secs[__td_index];
else {
- if (eta_secs[i] > je->eta_sec)
- je->eta_sec = eta_secs[i];
+ if (eta_secs[__td_index] > je->eta_sec)
+ je->eta_sec = eta_secs[__td_index];
}
- }
+ } end_for_each();
je->eta_sec += eta_stone;
}
rate_time = mtime_since(&rate_prev_time, &now);
any_td_in_ramp = false;
- for_each_td(td, i) {
+ for_each_td(td) {
any_td_in_ramp |= in_ramp_time(td);
- }
+ } end_for_each();
if (write_bw_log && rate_time > bw_avg_time && !any_td_in_ramp) {
calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
je->rate);
/*
* Iterates all threads/processes within all the defined jobs
+ * Usage:
+ * for_each_td(var_name_for_td) {
+ * << bodoy of your loop >>
+ * Note: internally-scoped loop index availble as __td_index
+ * } end_for_each_td()
*/
-#define for_each_td(td, i) \
- for ((i) = 0, (td) = &segments[0].threads[0]; (i) < (int) thread_number; (i)++, (td) = tnumber_to_td((i)))
+#define for_each_td(td) \
+{ \
+ int __td_index; \
+ struct thread_data *(td); \
+ for (__td_index = 0, (td) = &segments[0].threads[0];\
+ __td_index < (int) thread_number; __td_index++, (td) = tnumber_to_td(__td_index))
+#define for_each_td_index() \
+{ \
+ int __td_index; \
+ for (__td_index = 0; __td_index < (int) thread_number; __td_index++)
+#define end_for_each() }
+
#define for_each_file(td, f, i) \
if ((td)->files_index) \
for ((i) = 0, (f) = (td)->files[0]; \
static int check_waitees(char *waitee)
{
- struct thread_data *td;
- int i, ret = 0;
+ int ret = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->subjob_number)
continue;
ret += !strcmp(td->o.name, waitee);
- }
+ } end_for_each();
return ret;
}
static int verify_per_group_options(struct thread_data *td, const char *jobname)
{
- struct thread_data *td2;
- int i;
-
- for_each_td(td2, i) {
+ for_each_td(td2) {
if (td->groupid != td2->groupid)
continue;
jobname);
return 1;
}
- }
+ } end_for_each();
return 0;
}
void fio_writeout_logs(bool unit_logs)
{
- struct thread_data *td;
- int i;
-
- for_each_td(td, i)
+ for_each_td(td) {
td_writeout_logs(td, unit_logs);
+ } end_for_each();
}
void fio_terminate_threads(unsigned int group_id, unsigned int terminate)
{
- struct thread_data *td;
pid_t pid = getpid();
- int i;
dprint(FD_PROCESS, "terminate group_id=%d\n", group_id);
- for_each_td(td, i) {
+ for_each_td(td) {
if ((terminate == TERMINATE_GROUP && group_id == TERMINATE_ALL) ||
(terminate == TERMINATE_GROUP && group_id == td->groupid) ||
(terminate == TERMINATE_STONEWALL && td->runstate >= TD_RUNNING) ||
ops->terminate(td);
}
}
- }
+ } end_for_each();
}
int fio_running_or_pending_io_threads(void)
{
- struct thread_data *td;
- int i;
int nr_io_threads = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->io_ops_init && td_ioengine_flagged(td, FIO_NOIO))
continue;
nr_io_threads++;
if (td->runstate < TD_EXITED)
return 1;
- }
+ } end_for_each();
if (!nr_io_threads)
return -1; /* we only had cpuio threads to begin with */
static void check_overlap(struct io_u *io_u)
{
- int i, res;
- struct thread_data *td;
+ int res;
/*
* Allow only one thread to check for overlap at a time to prevent two
assert(res == 0);
retry:
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate <= TD_SETTING_UP ||
td->runstate >= TD_FINISHING ||
!td->o.serialize_overlap ||
res = pthread_mutex_lock(&overlap_check);
assert(res == 0);
goto retry;
- }
+ } end_for_each();
}
static int io_workqueue_fn(struct submit_worker *sw,
static void init_per_prio_stats(struct thread_stat *threadstats, int nr_ts)
{
- struct thread_data *td;
struct thread_stat *ts;
int i, j, last_ts, idx;
enum fio_ddir ddir;
* store a 1 in ts->disable_prio_stat, and then do an additional
* loop at the end where we invert the ts->disable_prio_stat values.
*/
- for_each_td(td, i) {
+ for_each_td(td) {
if (!td->o.stats)
continue;
if (idx &&
}
idx++;
- }
+ } end_for_each();
/* Loop through all dst threadstats and fixup the values. */
for (i = 0; i < nr_ts; i++) {
void __show_run_stats(void)
{
struct group_run_stats *runstats, *rs;
- struct thread_data *td;
struct thread_stat *threadstats, *ts;
int i, j, k, nr_ts, last_ts, idx;
bool kb_base_warned = false;
*/
nr_ts = 0;
last_ts = -1;
- for_each_td(td, i) {
+ for_each_td(td) {
if (!td->o.group_reporting) {
nr_ts++;
continue;
last_ts = td->groupid;
nr_ts++;
- }
+ } end_for_each();
threadstats = malloc(nr_ts * sizeof(struct thread_stat));
opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
j = 0;
last_ts = -1;
idx = 0;
- for_each_td(td, i) {
+ for_each_td(td) {
if (!td->o.stats)
continue;
if (idx && (!td->o.group_reporting ||
}
else
ts->ss_dur = ts->ss_state = 0;
- }
+ } end_for_each();
for (i = 0; i < nr_ts; i++) {
unsigned long long bw;
int __show_running_run_stats(void)
{
- struct thread_data *td;
unsigned long long *rt;
struct timespec ts;
- int i;
fio_sem_down(stat_sem);
rt = malloc(thread_number * sizeof(unsigned long long));
fio_gettime(&ts, NULL);
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate >= TD_EXITED)
continue;
}
td->ts.total_run_time = mtime_since(&td->epoch, &ts);
- rt[i] = mtime_since(&td->start, &ts);
+ rt[__td_index] = mtime_since(&td->start, &ts);
if (td_read(td) && td->ts.io_bytes[DDIR_READ])
- td->ts.runtime[DDIR_READ] += rt[i];
+ td->ts.runtime[DDIR_READ] += rt[__td_index];
if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
- td->ts.runtime[DDIR_WRITE] += rt[i];
+ td->ts.runtime[DDIR_WRITE] += rt[__td_index];
if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
- td->ts.runtime[DDIR_TRIM] += rt[i];
- }
+ td->ts.runtime[DDIR_TRIM] += rt[__td_index];
+ } end_for_each();
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate >= TD_EXITED)
continue;
if (td->rusage_sem) {
fio_sem_down(td->rusage_sem);
}
td->update_rusage = 0;
- }
+ } end_for_each();
__show_run_stats();
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->runstate >= TD_EXITED)
continue;
if (td_read(td) && td->ts.io_bytes[DDIR_READ])
- td->ts.runtime[DDIR_READ] -= rt[i];
+ td->ts.runtime[DDIR_READ] -= rt[__td_index];
if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
- td->ts.runtime[DDIR_WRITE] -= rt[i];
+ td->ts.runtime[DDIR_WRITE] -= rt[__td_index];
if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
- td->ts.runtime[DDIR_TRIM] -= rt[i];
- }
+ td->ts.runtime[DDIR_TRIM] -= rt[__td_index];
+ } end_for_each();
free(rt);
fio_sem_up(stat_sem);
*/
int calc_log_samples(void)
{
- struct thread_data *td;
unsigned int next = ~0U, tmp = 0, next_mod = 0, log_avg_msec_min = -1U;
struct timespec now;
- int i;
long elapsed_time = 0;
fio_gettime(&now, NULL);
- for_each_td(td, i) {
+ for_each_td(td) {
elapsed_time = mtime_since_now(&td->epoch);
if (!td->o.stats)
if (tmp < next)
next = tmp;
- }
+ } end_for_each();
/* if log_avg_msec_min has not been changed, set it to 0 */
if (log_avg_msec_min == -1U)
void steadystate_setup(void)
{
- struct thread_data *td, *prev_td;
- int i, prev_groupid;
+ struct thread_data *prev_td;
+ int prev_groupid;
if (!steadystate_enabled)
return;
*/
prev_groupid = -1;
prev_td = NULL;
- for_each_td(td, i) {
+ for_each_td(td) {
if (!td->ss.dur)
continue;
prev_groupid = td->groupid;
}
prev_td = td;
- }
+ } end_for_each();
if (prev_td && prev_td->o.group_reporting)
steadystate_alloc(prev_td);
int steadystate_check(void)
{
- int i, j, ddir, prev_groupid, group_ramp_time_over = 0;
+ int ddir, prev_groupid, group_ramp_time_over = 0;
unsigned long rate_time;
- struct thread_data *td, *td2;
struct timespec now;
uint64_t group_bw = 0, group_iops = 0;
uint64_t td_iops, td_bytes;
bool ret;
prev_groupid = -1;
- for_each_td(td, i) {
+ for_each_td(td) {
const bool needs_lock = td_async_processing(td);
struct steadystate_data *ss = &td->ss;
dprint(FD_STEADYSTATE, "steadystate_check() thread: %d, "
"groupid: %u, rate_msec: %ld, "
"iops: %llu, bw: %llu, head: %d, tail: %d\n",
- i, td->groupid, rate_time,
+ __td_index, td->groupid, rate_time,
(unsigned long long) group_iops,
(unsigned long long) group_bw,
ss->head, ss->tail);
if (ret) {
if (td->o.group_reporting) {
- for_each_td(td2, j) {
+ for_each_td(td2) {
if (td2->groupid == td->groupid) {
td2->ss.state |= FIO_SS_ATTAINED;
fio_mark_td_terminate(td2);
}
- }
+ } end_for_each();
} else {
ss->state |= FIO_SS_ATTAINED;
fio_mark_td_terminate(td);
}
}
- }
+ } end_for_each();
return 0;
}
{
struct steadystate_data *ss = &td->ss;
struct thread_options *o = &td->o;
- struct thread_data *td2;
- int j;
memset(ss, 0, sizeof(*ss));
}
/* make sure that ss options are consistent within reporting group */
- for_each_td(td2, j) {
+ for_each_td(td2) {
if (td2->groupid == td->groupid) {
struct steadystate_data *ss2 = &td2->ss;
return 1;
}
}
- }
+ } end_for_each();
return 0;
}
struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
{
struct all_io_list *rep;
- struct thread_data *td;
size_t depth;
void *next;
- int i, nr;
+ int nr;
compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
*/
depth = 0;
nr = 0;
- for_each_td(td, i) {
- if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
+ for_each_td(td) {
+ if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
continue;
td->stop_io = 1;
td->flags |= TD_F_VSTATE_SAVED;
depth += (td->o.iodepth * td->o.nr_files);
nr++;
- }
+ } end_for_each();
if (!nr)
return NULL;
rep->threads = cpu_to_le64((uint64_t) nr);
next = &rep->state[0];
- for_each_td(td, i) {
+ for_each_td(td) {
struct thread_io_list *s = next;
unsigned int comps, index = 0;
- if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
+ if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
continue;
comps = fill_file_completions(td, s, &index);
s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
- s->index = cpu_to_le64((uint64_t) i);
+ s->index = cpu_to_le64((uint64_t) __td_index);
if (td->random_state.use64) {
s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
}
snprintf((char *) s->name, sizeof(s->name), "%s", td->o.name);
next = io_list_next(s);
- }
+ } end_for_each();
return rep;
}
/* Verify whether direct I/O is used for all host-managed zoned block drives. */
static bool zbd_using_direct_io(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
continue;
for_each_file(td, f, j) {
f->zbd_info->model == ZBD_HOST_MANAGED)
return false;
}
- }
+ } end_for_each();
return true;
}
*/
static bool zbd_verify_sizes(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
for_each_file(td, f, j) {
if (!zbd_zone_align_file_sizes(td, f))
return false;
}
- }
+ } end_for_each();
return true;
}
static bool zbd_verify_bs(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td_trim(td) &&
(td->o.min_bs[DDIR_TRIM] != td->o.max_bs[DDIR_TRIM] ||
td->o.bssplit_nr[DDIR_TRIM])) {
return false;
}
}
- }
+ } end_for_each();
return true;
}
*/
static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
{
- struct thread_data *td2;
struct fio_file *f2;
- int i, j, ret;
+ int j, ret;
- for_each_td(td2, i) {
+ for_each_td(td2) {
for_each_file(td2, f2, j) {
if (td2 == td && f2 == file)
continue;
file->zbd_info->refcount++;
return 0;
}
- }
+ } end_for_each();
ret = zbd_create_zone_info(td, file);
if (ret < 0)
static bool any_io_in_flight(void)
{
- struct thread_data *td;
- int i;
-
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->io_u_in_flight)
return true;
- }
+ } end_for_each();
return false;
}