#ifdef WIN32
static void sig_break(int sig)
{
- struct thread_data *td;
int i;
sig_int(sig);
static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
uint64_t *m_rate)
{
- struct thread_data *td;
unsigned int cputhreads, realthreads, pending;
int i, status, ret;
{
const char *waitee = me->o.wait_for;
const char *self = me->o.name;
- struct thread_data *td;
int i;
if (!waitee)
*/
static void run_threads(struct sk_out *sk_out)
{
- struct thread_data *td;
unsigned int i, todo, nr_running, nr_started;
uint64_t m_rate, t_rate;
uint64_t spent;
do_usleep(100000);
for (i = 0; i < this_jobs; i++) {
- td = map[i];
+ struct thread_data *td = map[i];
if (!td)
continue;
if (td->runstate == TD_INITIALIZED) {
log_err("fio: %d job%s failed to start\n", left,
left > 1 ? "s" : "");
for (i = 0; i < this_jobs; i++) {
- td = map[i];
+ struct thread_data *td = map[i];
if (!td)
continue;
kill(td->pid, SIGTERM);
int fio_backend(struct sk_out *sk_out)
{
- struct thread_data *td;
int i;
if (exec_profile) {
int init_global_dedupe_working_set_seeds(void)
{
int i;
- struct thread_data *td;
for_each_td(td, i) {
if (!td->o.dedupe_global)
*/
static int total_threaded_subjobs(bool hipri)
{
- struct thread_data *td;
unsigned int i;
int count = 0;
*/
bool calc_thread_status(struct jobs_eta *je, int force)
{
- struct thread_data *td;
int i, unified_rw_rep;
uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
static struct timespec rate_prev_time, disp_prev_time;
+ bool all_in_ramp = false;
if (!force) {
if (!(output_format & FIO_OUTPUT_NORMAL) &&
bw_avg_time = ULONG_MAX;
unified_rw_rep = 0;
for_each_td(td, i) {
+ all_in_ramp |= in_ramp_time(td);
unified_rw_rep += td->o.unified_rw_rep;
if (is_power_of_2(td->o.kb_base))
je->is_pow2 = 1;
if (exitall_on_terminate) {
je->eta_sec = INT_MAX;
- for_each_td(td, i) {
- if (eta_secs[i] < je->eta_sec)
- je->eta_sec = eta_secs[i];
- }
+ if (eta_secs[i] < je->eta_sec)
+ je->eta_sec = eta_secs[i];
} else {
unsigned long eta_stone = 0;
fio_gettime(&now, NULL);
rate_time = mtime_since(&rate_prev_time, &now);
- if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
+ if (write_bw_log && rate_time > bw_avg_time && !all_in_ramp) {
calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
je->rate);
memcpy(&rate_prev_time, &now, sizeof(now));
/*
* Iterates all threads/processes within all the defined jobs
*/
-#define for_each_td(td, i) \
- for ((i) = 0, (td) = &segments[0].threads[0]; (i) < (int) thread_number; (i)++, (td) = tnumber_to_td((i)))
+#define for_each_td(__td, __i) \
+ (__i) = 0; \
+ for (struct thread_data *__td = &segments[0].threads[0]; \
+ (__i) < (int) thread_number; \
+ (__i++), td = tnumber_to_td(__i))
+
#define for_each_file(td, f, i) \
if ((td)->files_index) \
for ((i) = 0, (f) = (td)->files[0]; \
static int check_waitees(char *waitee)
{
- struct thread_data *td;
int i, ret = 0;
for_each_td(td, i) {
static int verify_per_group_options(struct thread_data *td, const char *jobname)
{
- struct thread_data *td2;
int i;
for_each_td(td2, i) {
void fio_writeout_logs(bool unit_logs)
{
- struct thread_data *td;
int i;
for_each_td(td, i)
void fio_terminate_threads(unsigned int group_id, unsigned int terminate)
{
- struct thread_data *td;
pid_t pid = getpid();
int i;
int fio_running_or_pending_io_threads(void)
{
- struct thread_data *td;
int i;
int nr_io_threads = 0;
static void check_overlap(struct io_u *io_u)
{
int i, res;
- struct thread_data *td;
/*
* Allow only one thread to check for overlap at a time to prevent two
static void init_per_prio_stats(struct thread_stat *threadstats, int nr_ts)
{
- struct thread_data *td;
struct thread_stat *ts;
int i, j, last_ts, idx;
enum fio_ddir ddir;
void __show_run_stats(void)
{
struct group_run_stats *runstats, *rs;
- struct thread_data *td;
struct thread_stat *threadstats, *ts;
int i, j, k, nr_ts, last_ts, idx;
bool kb_base_warned = false;
int __show_running_run_stats(void)
{
- struct thread_data *td;
unsigned long long *rt;
struct timespec ts;
int i;
*/
int calc_log_samples(void)
{
- struct thread_data *td;
unsigned int next = ~0U, tmp = 0, next_mod = 0, log_avg_msec_min = -1U;
struct timespec now;
int i;
void steadystate_setup(void)
{
- struct thread_data *td, *prev_td;
+ struct thread_data *prev_td;
int i, prev_groupid;
if (!steadystate_enabled)
{
int i, j, ddir, prev_groupid, group_ramp_time_over = 0;
unsigned long rate_time;
- struct thread_data *td, *td2;
struct timespec now;
uint64_t group_bw = 0, group_iops = 0;
uint64_t td_iops, td_bytes;
{
struct steadystate_data *ss = &td->ss;
struct thread_options *o = &td->o;
- struct thread_data *td2;
int j;
memset(ss, 0, sizeof(*ss));
struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
{
struct all_io_list *rep;
- struct thread_data *td;
size_t depth;
void *next;
int i, nr;
/* Verify whether direct I/O is used for all host-managed zoned block drives. */
static bool zbd_using_direct_io(void)
{
- struct thread_data *td;
struct fio_file *f;
int i, j;
*/
static bool zbd_verify_sizes(void)
{
- struct thread_data *td;
struct fio_file *f;
int i, j;
static bool zbd_verify_bs(void)
{
- struct thread_data *td;
struct fio_file *f;
int i, j;
*/
static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
{
- struct thread_data *td2;
struct fio_file *f2;
int i, j, ret;
static bool any_io_in_flight(void)
{
- struct thread_data *td;
int i;
for_each_td(td, i) {