X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=eta.c;h=f90d428197656f362fcdf199865a65f06dbebbec;hp=e2a5a29ab3dcdf223ec5071c5fe6c5e636aacef7;hb=e382e661f6a67a24d8042b9e4b8f812b7126bdc4;hpb=6eaf09d6e9ca1f8accb057cdb18620b7e53ae33f diff --git a/eta.c b/eta.c index e2a5a29a..f90d4281 100644 --- a/eta.c +++ b/eta.c @@ -78,6 +78,7 @@ static void check_str_update(struct thread_data *td) c = 'C'; break; case TD_INITIALIZED: + case TD_SETTING_UP: c = 'I'; break; case TD_NOT_CREATED: @@ -138,25 +139,36 @@ static int thread_eta(struct thread_data *td) bytes_total = td->fill_device_size; } + if (td->o.zone_size && td->o.zone_skip && bytes_total) { + unsigned int nr_zones; + uint64_t zone_bytes; + + zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip; + nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip); + bytes_total -= nr_zones * td->o.zone_skip; + } + /* - * if writing, bytes_total will be twice the size. If mixing, - * assume a 50/50 split and thus bytes_total will be 50% larger. + * if writing and verifying afterwards, bytes_total will be twice the + * size. In a mixed workload, verify phase will be the size of the + * first stage writes. */ if (td->o.do_verify && td->o.verify && td_write(td)) { - if (td_rw(td)) - bytes_total = bytes_total * 3 / 2; - else + if (td_rw(td)) { + unsigned int perc = 50; + + if (td->o.rwmix[DDIR_WRITE]) + perc = td->o.rwmix[DDIR_WRITE]; + + bytes_total += (bytes_total * perc) / 100; + } else bytes_total <<= 1; } - if (td->o.zone_size && td->o.zone_skip) - bytes_total /= (td->o.zone_skip / td->o.zone_size); - if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) { double perc, perc_t; - bytes_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE] + - td->io_bytes[DDIR_TRIM]; + bytes_done = ddir_rw_sum(td->io_bytes); perc = (double) bytes_done / (double) bytes_total; if (perc > 1.0) perc = 1.0; @@ -177,6 +189,7 @@ static int thread_eta(struct thread_data *td) || td->runstate == TD_RAMP || td->runstate == TD_PRE_READING) { int t_eta = 0, r_eta = 0; + unsigned long long rate_bytes; /* * We can only guess - assume it'll run the full timeout @@ -195,11 +208,9 @@ static int thread_eta(struct thread_data *td) t_eta -= ramp_left; } } - if (td->o.rate[DDIR_READ] || td->o.rate[DDIR_WRITE] || - td->o.rate[DDIR_TRIM]) { - r_eta = (bytes_total / 1024) / - (td->o.rate[DDIR_READ] + td->o.rate[DDIR_WRITE] + - td->o.rate[DDIR_TRIM]); + rate_bytes = ddir_rw_sum(td->o.rate); + if (rate_bytes) { + r_eta = (bytes_total / 1024) / rate_bytes; r_eta += td->o.start_delay; } @@ -221,7 +232,8 @@ static int thread_eta(struct thread_data *td) return eta_sec; } -static void calc_rate(unsigned long mtime, unsigned long long *io_bytes, +static void calc_rate(int unified_rw_rep, unsigned long mtime, + unsigned long long *io_bytes, unsigned long long *prev_io_bytes, unsigned int *rate) { int i; @@ -230,19 +242,32 @@ static void calc_rate(unsigned long mtime, unsigned long long *io_bytes, unsigned long long diff; diff = io_bytes[i] - prev_io_bytes[i]; - rate[i] = ((1000 * diff) / mtime) / 1024; + if (unified_rw_rep) { + rate[i] = 0; + rate[0] += ((1000 * diff) / mtime) / 1024; + } else + rate[i] = ((1000 * diff) / mtime) / 1024; prev_io_bytes[i] = io_bytes[i]; } } -static void calc_iops(unsigned long mtime, unsigned long long *io_iops, +static void calc_iops(int unified_rw_rep, unsigned long mtime, + unsigned long long *io_iops, unsigned long long *prev_io_iops, unsigned int *iops) { int i; for (i = 0; i < DDIR_RWDIR_CNT; i++) { - iops[i] = ((io_iops[i] - prev_io_iops[i]) * 1000) / mtime; + unsigned long long diff; + + diff = io_iops[i] - prev_io_iops[i]; + if (unified_rw_rep) { + iops[i] = 0; + iops[0] += (diff * 1000) / mtime; + } else + iops[i] = (diff * 1000) / mtime; + prev_io_iops[i] = io_iops[i]; } } @@ -254,7 +279,7 @@ static void calc_iops(unsigned long mtime, unsigned long long *io_iops, int calc_thread_status(struct jobs_eta *je, int force) { struct thread_data *td; - int i; + int i, unified_rw_rep; unsigned long rate_time, disp_time, bw_avg_time, *eta_secs; unsigned long long io_bytes[DDIR_RWDIR_CNT]; unsigned long long io_iops[DDIR_RWDIR_CNT]; @@ -266,18 +291,19 @@ int calc_thread_status(struct jobs_eta *je, int force) static struct timeval rate_prev_time, disp_prev_time; if (!force) { - if (temp_stall_ts || terse_output || eta_print == FIO_ETA_NEVER) + if (output_format != FIO_OUTPUT_NORMAL && + f_out == stdout) + return 0; + if (temp_stall_ts || eta_print == FIO_ETA_NEVER) return 0; if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS)) return 0; } - if (!rate_io_bytes[DDIR_READ] && !rate_io_bytes[DDIR_WRITE] && - !rate_io_bytes[DDIR_TRIM]) + if (!ddir_rw_sum(rate_io_bytes)) fill_start_time(&rate_prev_time); - if (!disp_io_bytes[DDIR_READ] && !disp_io_bytes[DDIR_WRITE] && - !disp_io_bytes[DDIR_TRIM]) + if (!ddir_rw_sum(disp_io_bytes)) fill_start_time(&disp_prev_time); eta_secs = malloc(thread_number * sizeof(unsigned long)); @@ -288,7 +314,9 @@ int calc_thread_status(struct jobs_eta *je, int force) io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0; io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0; bw_avg_time = ULONG_MAX; + unified_rw_rep = 0; for_each_td(td, i) { + unified_rw_rep += td->o.unified_rw_rep; if (is_power_of_2(td->o.kb_base)) je->is_pow2 = 1; if (td->o.bw_avg_time < bw_avg_time) @@ -320,7 +348,9 @@ int calc_thread_status(struct jobs_eta *je, int force) } else if (td->runstate == TD_RAMP) { je->nr_running++; je->nr_ramp++; - } else if (td->runstate < TD_RUNNING) + } else if (td->runstate == TD_SETTING_UP) + je->nr_running++; + else if (td->runstate < TD_RUNNING) je->nr_pending++; if (je->elapsed_sec >= 3) @@ -332,9 +362,15 @@ int calc_thread_status(struct jobs_eta *je, int force) if (td->runstate > TD_RAMP) { int ddir; + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) { - io_bytes[ddir] += td->io_bytes[ddir]; - io_iops[ddir] += td->io_blocks[ddir]; + if (unified_rw_rep) { + io_bytes[0] += td->io_bytes[ddir]; + io_iops[0] += td->io_blocks[ddir]; + } else { + io_bytes[ddir] += td->io_bytes[ddir]; + io_iops[ddir] += td->io_blocks[ddir]; + } } } } @@ -360,7 +396,8 @@ int calc_thread_status(struct jobs_eta *je, int force) rate_time = mtime_since(&rate_prev_time, &now); if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) { - calc_rate(rate_time, io_bytes, rate_io_bytes, je->rate); + calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes, + je->rate); memcpy(&rate_prev_time, &now, sizeof(now)); add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0); add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0); @@ -375,8 +412,8 @@ int calc_thread_status(struct jobs_eta *je, int force) if (!force && disp_time < 900) return 0; - calc_rate(disp_time, io_bytes, disp_io_bytes, je->rate); - calc_iops(disp_time, io_iops, disp_io_iops, je->iops); + calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate); + calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops); memcpy(&disp_prev_time, &now, sizeof(now)); @@ -385,12 +422,13 @@ int calc_thread_status(struct jobs_eta *je, int force) je->nr_threads = thread_number; memcpy(je->run_str, run_str, thread_number * sizeof(char)); - return 1; } void display_thread_status(struct jobs_eta *je) { + static struct timeval disp_eta_new_line; + static int eta_new_line_init, eta_new_line_pending; static int linelen_last; static int eta_good; char output[REAL_MAX_JOBS + 512], *p = output; @@ -402,6 +440,11 @@ void display_thread_status(struct jobs_eta *je) eta_to_str(eta_str, je->eta_sec); } + if (eta_new_line_pending) { + eta_new_line_pending = 0; + p += sprintf(p, "\n"); + } + p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open); if (je->m_rate || je->t_rate) { char *tr, *mr; @@ -455,6 +498,16 @@ void display_thread_status(struct jobs_eta *je) p += sprintf(p, "\r"); printf("%s", output); + + if (!eta_new_line_init) { + fio_gettime(&disp_eta_new_line, NULL); + eta_new_line_init = 1; + } else if (eta_new_line && + mtime_since_now(&disp_eta_new_line) > eta_new_line * 1000) { + fio_gettime(&disp_eta_new_line, NULL); + eta_new_line_pending = 1; + } + fflush(stdout); }