7 #ifdef CONFIG_VALGRIND_DEV
8 #include <valgrind/drd.h>
10 #define DRD_IGNORE_VAR(x) do { } while (0)
16 static char __run_str[REAL_MAX_JOBS + 1];
17 static char run_str[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 1];
19 static void update_condensed_str(char *rstr, char *run_str_condensed)
25 *run_str_condensed++ = *rstr++;
26 while (*(rstr - 1) == *rstr) {
30 run_str_condensed += sprintf(run_str_condensed, "(%u),", nr);
34 *run_str_condensed = '\0';
38 * Sets the status of the 'td' in the printed status map.
40 static void check_str_update(struct thread_data *td)
42 char c = __run_str[td->thread_number - 1];
44 switch (td->runstate) {
62 if (td->o.rwmix[DDIR_READ] == 100)
64 else if (td->o.rwmix[DDIR_WRITE] == 100)
69 if (td->o.rwmix[DDIR_READ] == 100)
71 else if (td->o.rwmix[DDIR_WRITE] == 100)
76 } else if (td_read(td)) {
81 } else if (td_write(td)) {
116 log_err("state %d\n", td->runstate);
119 __run_str[td->thread_number - 1] = c;
120 update_condensed_str(__run_str, run_str);
124 * Convert seconds to a printable string.
126 void eta_to_str(char *str, unsigned long eta_sec)
128 unsigned int d, h, m, s;
146 str += sprintf(str, "%02ud:", d);
150 str += sprintf(str, "%02uh:", h);
152 str += sprintf(str, "%02um:", m);
153 sprintf(str, "%02us", s);
157 * Best effort calculation of the estimated pending runtime of a job.
159 static unsigned long thread_eta(struct thread_data *td)
161 unsigned long long bytes_total, bytes_done;
162 unsigned long eta_sec = 0;
163 unsigned long elapsed;
166 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
167 timeout = td->o.timeout / 1000000UL;
169 bytes_total = td->total_io_size;
171 if (td->flags & TD_F_NO_PROGRESS)
174 if (td->o.fill_device && td->o.size == -1ULL) {
175 if (!td->fill_device_size || td->fill_device_size == -1ULL)
178 bytes_total = td->fill_device_size;
182 * If io_size is set, bytes_total is an exact value that does not need
185 if (td->o.zone_size && td->o.zone_skip && bytes_total &&
186 !fio_option_is_set(&td->o, io_size)) {
187 unsigned int nr_zones;
191 * Calculate the upper bound of the number of zones that will
192 * be processed, including skipped bytes between zones. If this
193 * is larger than total_io_size (e.g. when --io_size or --size
194 * specify a small value), use the lower bound to avoid
195 * adjustments to a negative value that would result in a very
196 * large bytes_total and an incorrect eta.
198 zone_bytes = td->o.zone_size + td->o.zone_skip;
199 nr_zones = (bytes_total + zone_bytes - 1) / zone_bytes;
200 if (bytes_total < nr_zones * td->o.zone_skip)
201 nr_zones = bytes_total / zone_bytes;
202 bytes_total -= nr_zones * td->o.zone_skip;
206 * if writing and verifying afterwards, bytes_total will be twice the
207 * size. In a mixed workload, verify phase will be the size of the
208 * first stage writes.
210 if (td->o.do_verify && td->o.verify && td_write(td)) {
212 unsigned int perc = 50;
214 if (td->o.rwmix[DDIR_WRITE])
215 perc = td->o.rwmix[DDIR_WRITE];
217 bytes_total += (bytes_total * perc) / 100;
222 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
225 bytes_done = ddir_rw_sum(td->io_bytes);
228 perc = (double) bytes_done / (double) bytes_total;
234 if (td->o.time_based) {
236 perc_t = (double) elapsed / (double) timeout;
241 * Will never hit, we can't have time_based
242 * without a timeout set.
251 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
255 eta_sec > (timeout + done_secs - elapsed))
256 eta_sec = timeout + done_secs - elapsed;
257 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
258 || td->runstate == TD_INITIALIZED
259 || td->runstate == TD_SETTING_UP
260 || td->runstate == TD_RAMP
261 || td->runstate == TD_PRE_READING) {
262 int64_t t_eta = 0, r_eta = 0;
263 unsigned long long rate_bytes;
266 * We can only guess - assume it'll run the full timeout
267 * if given, otherwise assume it'll run at the specified rate.
270 uint64_t __timeout = td->o.timeout;
271 uint64_t start_delay = td->o.start_delay;
272 uint64_t ramp_time = td->o.ramp_time;
274 t_eta = __timeout + start_delay;
275 if (!td->ramp_time_over) {
280 if ((td->runstate == TD_RAMP) && in_ramp_time(td)) {
281 unsigned long ramp_left;
283 ramp_left = mtime_since_now(&td->epoch);
284 ramp_left = (ramp_left + 999) / 1000;
285 if (ramp_left <= t_eta)
291 rate_bytes = td->o.rate[DDIR_READ];
293 rate_bytes += td->o.rate[DDIR_WRITE];
295 rate_bytes += td->o.rate[DDIR_TRIM];
298 r_eta = bytes_total / rate_bytes;
299 r_eta += (td->o.start_delay / 1000000ULL);
303 eta_sec = min(r_eta, t_eta);
312 * thread is already done or waiting for fsync
320 static void calc_rate(int unified_rw_rep, unsigned long mtime,
321 unsigned long long *io_bytes,
322 unsigned long long *prev_io_bytes, uint64_t *rate)
326 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
327 unsigned long long diff, this_rate;
329 diff = io_bytes[i] - prev_io_bytes[i];
331 this_rate = ((1000 * diff) / mtime) / 1024; /* KiB/s */
335 if (unified_rw_rep == UNIFIED_MIXED) {
337 rate[0] += this_rate;
341 prev_io_bytes[i] = io_bytes[i];
345 static void calc_iops(int unified_rw_rep, unsigned long mtime,
346 unsigned long long *io_iops,
347 unsigned long long *prev_io_iops, unsigned int *iops)
351 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
352 unsigned long long diff, this_iops;
354 diff = io_iops[i] - prev_io_iops[i];
356 this_iops = (diff * 1000) / mtime;
360 if (unified_rw_rep == UNIFIED_MIXED) {
362 iops[0] += this_iops;
366 prev_io_iops[i] = io_iops[i];
371 * Allow a little slack - if we're within 95% of the time, allow ETA.
373 bool eta_time_within_slack(unsigned int time)
375 return time > ((eta_interval_msec * 95) / 100);
379 * Print status of the jobs we know about. This includes rate estimates,
380 * ETA, thread state, etc.
382 bool calc_thread_status(struct jobs_eta *je, int force)
384 struct thread_data *td;
385 int i, unified_rw_rep;
386 uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
387 unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
388 unsigned long long io_iops[DDIR_RWDIR_CNT] = {};
391 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
392 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
393 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
394 static struct timespec rate_prev_time, disp_prev_time;
397 if (!(output_format & FIO_OUTPUT_NORMAL) &&
400 if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
403 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
407 if (!ddir_rw_sum(rate_io_bytes))
408 fill_start_time(&rate_prev_time);
409 if (!ddir_rw_sum(disp_io_bytes))
410 fill_start_time(&disp_prev_time);
412 eta_secs = malloc(thread_number * sizeof(uint64_t));
413 memset(eta_secs, 0, thread_number * sizeof(uint64_t));
415 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
417 bw_avg_time = ULONG_MAX;
420 unified_rw_rep += td->o.unified_rw_rep;
421 if (is_power_of_2(td->o.kb_base))
423 je->unit_base = td->o.unit_base;
424 je->sig_figs = td->o.sig_figs;
425 if (td->o.bw_avg_time < bw_avg_time)
426 bw_avg_time = td->o.bw_avg_time;
427 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
428 || td->runstate == TD_FSYNCING
429 || td->runstate == TD_PRE_READING
430 || td->runstate == TD_FINISHING) {
433 je->t_rate[0] += td->o.rate[DDIR_READ];
434 je->t_iops[0] += td->o.rate_iops[DDIR_READ];
435 je->m_rate[0] += td->o.ratemin[DDIR_READ];
436 je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
439 je->t_rate[1] += td->o.rate[DDIR_WRITE];
440 je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
441 je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
442 je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
445 je->t_rate[2] += td->o.rate[DDIR_TRIM];
446 je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
447 je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
448 je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
451 je->files_open += td->nr_open_files;
452 } else if (td->runstate == TD_RAMP) {
455 } else if (td->runstate == TD_SETTING_UP)
457 else if (td->runstate < TD_RUNNING)
460 if (je->elapsed_sec >= 3)
461 eta_secs[i] = thread_eta(td);
463 eta_secs[i] = INT_MAX;
465 check_str_update(td);
467 if (td->runstate > TD_SETTING_UP) {
470 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
471 if (unified_rw_rep) {
472 io_bytes[0] += td->io_bytes[ddir];
473 io_iops[0] += td->io_blocks[ddir];
475 io_bytes[ddir] += td->io_bytes[ddir];
476 io_iops[ddir] += td->io_blocks[ddir];
482 if (exitall_on_terminate) {
483 je->eta_sec = INT_MAX;
485 if (eta_secs[i] < je->eta_sec)
486 je->eta_sec = eta_secs[i];
489 unsigned long eta_stone = 0;
493 if ((td->runstate == TD_NOT_CREATED) && td->o.stonewall)
494 eta_stone += eta_secs[i];
496 if (eta_secs[i] > je->eta_sec)
497 je->eta_sec = eta_secs[i];
500 je->eta_sec += eta_stone;
505 fio_gettime(&now, NULL);
506 rate_time = mtime_since(&rate_prev_time, &now);
508 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
509 calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
511 memcpy(&rate_prev_time, &now, sizeof(now));
513 for_each_rw_ddir(ddir) {
514 add_agg_sample(sample_val(je->rate[ddir]), ddir, 0);
518 disp_time = mtime_since(&disp_prev_time, &now);
520 if (!force && !eta_time_within_slack(disp_time))
523 calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
524 calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
526 memcpy(&disp_prev_time, &now, sizeof(now));
528 if (!force && !je->nr_running && !je->nr_pending)
531 je->nr_threads = thread_number;
532 update_condensed_str(__run_str, run_str);
533 memcpy(je->run_str, run_str, strlen(run_str));
537 static int gen_eta_str(struct jobs_eta *je, char *p, size_t left,
538 char **rate_str, char **iops_str)
540 static const char c[DDIR_RWDIR_CNT] = {'r', 'w', 't'};
541 bool has[DDIR_RWDIR_CNT];
542 bool has_any = false;
546 for_each_rw_ddir(ddir) {
547 has[ddir] = (je->rate[ddir] || je->iops[ddir]);
548 has_any |= has[ddir];
553 l += snprintf(p + l, left - l, "[");
555 for_each_rw_ddir(ddir) {
557 l += snprintf(p + l, left - l, "%s%c=%s",
558 sep, c[ddir], rate_str[ddir]);
562 l += snprintf(p + l, left - l, "][");
564 for_each_rw_ddir(ddir) {
566 l += snprintf(p + l, left - l, "%s%c=%s",
567 sep, c[ddir], iops_str[ddir]);
571 l += snprintf(p + l, left - l, " IOPS]");
576 void display_thread_status(struct jobs_eta *je)
578 static struct timespec disp_eta_new_line;
579 static int eta_new_line_init, eta_new_line_pending;
580 static int linelen_last;
582 char output[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 512], *p = output;
586 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
587 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
588 eta_to_str(eta_str, je->eta_sec);
591 if (eta_new_line_pending) {
592 eta_new_line_pending = 0;
594 p += sprintf(p, "\n");
597 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
599 /* rate limits, if any */
600 if (je->m_rate[0] || je->m_rate[1] || je->m_rate[2] ||
601 je->t_rate[0] || je->t_rate[1] || je->t_rate[2]) {
604 mr = num2str(je->m_rate[0] + je->m_rate[1] + je->m_rate[2],
605 je->sig_figs, 1, je->is_pow2, N2S_BYTEPERSEC);
606 tr = num2str(je->t_rate[0] + je->t_rate[1] + je->t_rate[2],
607 je->sig_figs, 1, je->is_pow2, N2S_BYTEPERSEC);
609 p += sprintf(p, ", %s-%s", mr, tr);
612 } else if (je->m_iops[0] || je->m_iops[1] || je->m_iops[2] ||
613 je->t_iops[0] || je->t_iops[1] || je->t_iops[2]) {
614 p += sprintf(p, ", %d-%d IOPS",
615 je->m_iops[0] + je->m_iops[1] + je->m_iops[2],
616 je->t_iops[0] + je->t_iops[1] + je->t_iops[2]);
619 /* current run string, % done, bandwidth, iops, eta */
620 if (je->eta_sec != INT_MAX && je->nr_running) {
622 char *iops_str[DDIR_RWDIR_CNT];
623 char *rate_str[DDIR_RWDIR_CNT];
629 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running ||
631 strcpy(perc_str, "-.-%");
635 if (je->nr_setting_up && je->nr_running)
636 mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
640 sprintf(perc_str, "%3.1f%%", perc);
643 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
644 rate_str[ddir] = num2str(je->rate[ddir], 4,
645 1024, je->is_pow2, je->unit_base);
646 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, N2S_NONE);
649 left = sizeof(output) - (p - output) - 1;
650 l = snprintf(p, left, ": [%s][%s]", je->run_str, perc_str);
651 l += gen_eta_str(je, p + l, left - l, rate_str, iops_str);
652 l += snprintf(p + l, left - l, "[eta %s]", eta_str);
654 /* If truncation occurred adjust l so p is on the null */
658 linelen = p - output;
659 if (l >= 0 && linelen < linelen_last)
660 p += sprintf(p, "%*s", linelen_last - linelen, "");
661 linelen_last = linelen;
663 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
664 free(rate_str[ddir]);
665 free(iops_str[ddir]);
670 printf("%s", output);
672 if (!eta_new_line_init) {
673 fio_gettime(&disp_eta_new_line, NULL);
674 eta_new_line_init = 1;
675 } else if (eta_new_line && mtime_since_now(&disp_eta_new_line) > eta_new_line) {
676 fio_gettime(&disp_eta_new_line, NULL);
677 eta_new_line_pending = 1;
683 struct jobs_eta *get_jobs_eta(bool force, size_t *size)
690 *size = sizeof(*je) + THREAD_RUNSTR_SZ + 8;
694 memset(je, 0, *size);
696 if (!calc_thread_status(je, force)) {
701 *size = sizeof(*je) + strlen((char *) je->run_str) + 1;
705 void print_thread_status(void)
710 je = get_jobs_eta(false, &size);
712 display_thread_status(je);
717 void print_status_init(int thr_number)
719 struct jobs_eta_packed jep;
721 compiletime_assert(sizeof(struct jobs_eta) == sizeof(jep), "jobs_eta");
723 DRD_IGNORE_VAR(__run_str);
724 __run_str[thr_number] = 'P';
725 update_condensed_str(__run_str, run_str);