optgroup: move debug code into function
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7#ifdef CONFIG_VALGRIND_DEV
8#include <valgrind/drd.h>
9#else
10#define DRD_IGNORE_VAR(x) do { } while (0)
11#endif
12
13#include "fio.h"
14#include "lib/pow2.h"
15
16static char __run_str[REAL_MAX_JOBS + 1];
17static char run_str[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 1];
18
19static void update_condensed_str(char *rstr, char *run_str_condensed)
20{
21 if (*rstr) {
22 while (*rstr) {
23 int nr = 1;
24
25 *run_str_condensed++ = *rstr++;
26 while (*(rstr - 1) == *rstr) {
27 rstr++;
28 nr++;
29 }
30 run_str_condensed += sprintf(run_str_condensed, "(%u),", nr);
31 }
32 run_str_condensed--;
33 }
34 *run_str_condensed = '\0';
35}
36
37/*
38 * Sets the status of the 'td' in the printed status map.
39 */
40static void check_str_update(struct thread_data *td)
41{
42 char c = __run_str[td->thread_number - 1];
43
44 switch (td->runstate) {
45 case TD_REAPED:
46 if (td->error)
47 c = 'X';
48 else if (td->sig)
49 c = 'K';
50 else
51 c = '_';
52 break;
53 case TD_EXITED:
54 c = 'E';
55 break;
56 case TD_RAMP:
57 c = '/';
58 break;
59 case TD_RUNNING:
60 if (td_rw(td)) {
61 if (td_random(td)) {
62 if (td->o.rwmix[DDIR_READ] == 100)
63 c = 'r';
64 else if (td->o.rwmix[DDIR_WRITE] == 100)
65 c = 'w';
66 else
67 c = 'm';
68 } else {
69 if (td->o.rwmix[DDIR_READ] == 100)
70 c = 'R';
71 else if (td->o.rwmix[DDIR_WRITE] == 100)
72 c = 'W';
73 else
74 c = 'M';
75 }
76 } else if (td_read(td)) {
77 if (td_random(td))
78 c = 'r';
79 else
80 c = 'R';
81 } else if (td_write(td)) {
82 if (td_random(td))
83 c = 'w';
84 else
85 c = 'W';
86 } else {
87 if (td_random(td))
88 c = 'd';
89 else
90 c = 'D';
91 }
92 break;
93 case TD_PRE_READING:
94 c = 'p';
95 break;
96 case TD_VERIFYING:
97 c = 'V';
98 break;
99 case TD_FSYNCING:
100 c = 'F';
101 break;
102 case TD_FINISHING:
103 c = 'f';
104 break;
105 case TD_CREATED:
106 c = 'C';
107 break;
108 case TD_INITIALIZED:
109 case TD_SETTING_UP:
110 c = 'I';
111 break;
112 case TD_NOT_CREATED:
113 c = 'P';
114 break;
115 default:
116 log_err("state %d\n", td->runstate);
117 }
118
119 __run_str[td->thread_number - 1] = c;
120 update_condensed_str(__run_str, run_str);
121}
122
123/*
124 * Convert seconds to a printable string.
125 */
126void eta_to_str(char *str, unsigned long eta_sec)
127{
128 unsigned int d, h, m, s;
129 int disp_hour = 0;
130
131 if (eta_sec == -1) {
132 sprintf(str, "--");
133 return;
134 }
135
136 s = eta_sec % 60;
137 eta_sec /= 60;
138 m = eta_sec % 60;
139 eta_sec /= 60;
140 h = eta_sec % 24;
141 eta_sec /= 24;
142 d = eta_sec;
143
144 if (d) {
145 disp_hour = 1;
146 str += sprintf(str, "%02ud:", d);
147 }
148
149 if (h || disp_hour)
150 str += sprintf(str, "%02uh:", h);
151
152 str += sprintf(str, "%02um:", m);
153 str += sprintf(str, "%02us", s);
154}
155
156/*
157 * Best effort calculation of the estimated pending runtime of a job.
158 */
159static unsigned long thread_eta(struct thread_data *td)
160{
161 unsigned long long bytes_total, bytes_done;
162 unsigned long eta_sec = 0;
163 unsigned long elapsed;
164 uint64_t timeout;
165
166 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
167 timeout = td->o.timeout / 1000000UL;
168
169 bytes_total = td->total_io_size;
170
171 if (td->flags & TD_F_NO_PROGRESS)
172 return -1;
173
174 if (td->o.fill_device && td->o.size == -1ULL) {
175 if (!td->fill_device_size || td->fill_device_size == -1ULL)
176 return 0;
177
178 bytes_total = td->fill_device_size;
179 }
180
181 if (td->o.zone_size && td->o.zone_skip && bytes_total) {
182 unsigned int nr_zones;
183 uint64_t zone_bytes;
184
185 zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
186 nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
187 bytes_total -= nr_zones * td->o.zone_skip;
188 }
189
190 /*
191 * if writing and verifying afterwards, bytes_total will be twice the
192 * size. In a mixed workload, verify phase will be the size of the
193 * first stage writes.
194 */
195 if (td->o.do_verify && td->o.verify && td_write(td)) {
196 if (td_rw(td)) {
197 unsigned int perc = 50;
198
199 if (td->o.rwmix[DDIR_WRITE])
200 perc = td->o.rwmix[DDIR_WRITE];
201
202 bytes_total += (bytes_total * perc) / 100;
203 } else
204 bytes_total <<= 1;
205 }
206
207 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
208 double perc, perc_t;
209
210 bytes_done = ddir_rw_sum(td->io_bytes);
211
212 if (bytes_total) {
213 perc = (double) bytes_done / (double) bytes_total;
214 if (perc > 1.0)
215 perc = 1.0;
216 } else
217 perc = 0.0;
218
219 if (td->o.time_based) {
220 if (timeout) {
221 perc_t = (double) elapsed / (double) timeout;
222 if (perc_t < perc)
223 perc = perc_t;
224 } else {
225 /*
226 * Will never hit, we can't have time_based
227 * without a timeout set.
228 */
229 perc = 0.0;
230 }
231 }
232
233 if (perc == 0.0) {
234 eta_sec = timeout;
235 } else {
236 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
237 }
238
239 if (td->o.timeout &&
240 eta_sec > (timeout + done_secs - elapsed))
241 eta_sec = timeout + done_secs - elapsed;
242 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
243 || td->runstate == TD_INITIALIZED
244 || td->runstate == TD_SETTING_UP
245 || td->runstate == TD_RAMP
246 || td->runstate == TD_PRE_READING) {
247 int64_t t_eta = 0, r_eta = 0;
248 unsigned long long rate_bytes;
249
250 /*
251 * We can only guess - assume it'll run the full timeout
252 * if given, otherwise assume it'll run at the specified rate.
253 */
254 if (td->o.timeout) {
255 uint64_t __timeout = td->o.timeout;
256 uint64_t start_delay = td->o.start_delay;
257 uint64_t ramp_time = td->o.ramp_time;
258
259 t_eta = __timeout + start_delay;
260 if (!td->ramp_time_over) {
261 t_eta += ramp_time;
262 }
263 t_eta /= 1000000ULL;
264
265 if ((td->runstate == TD_RAMP) && in_ramp_time(td)) {
266 unsigned long ramp_left;
267
268 ramp_left = mtime_since_now(&td->epoch);
269 ramp_left = (ramp_left + 999) / 1000;
270 if (ramp_left <= t_eta)
271 t_eta -= ramp_left;
272 }
273 }
274 rate_bytes = 0;
275 if (td_read(td))
276 rate_bytes = td->o.rate[DDIR_READ];
277 if (td_write(td))
278 rate_bytes += td->o.rate[DDIR_WRITE];
279 if (td_trim(td))
280 rate_bytes += td->o.rate[DDIR_TRIM];
281
282 if (rate_bytes) {
283 r_eta = bytes_total / rate_bytes;
284 r_eta += (td->o.start_delay / 1000000ULL);
285 }
286
287 if (r_eta && t_eta)
288 eta_sec = min(r_eta, t_eta);
289 else if (r_eta)
290 eta_sec = r_eta;
291 else if (t_eta)
292 eta_sec = t_eta;
293 else
294 eta_sec = 0;
295 } else {
296 /*
297 * thread is already done or waiting for fsync
298 */
299 eta_sec = 0;
300 }
301
302 return eta_sec;
303}
304
305static void calc_rate(int unified_rw_rep, unsigned long mtime,
306 unsigned long long *io_bytes,
307 unsigned long long *prev_io_bytes, uint64_t *rate)
308{
309 int i;
310
311 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
312 unsigned long long diff, this_rate;
313
314 diff = io_bytes[i] - prev_io_bytes[i];
315 if (mtime)
316 this_rate = ((1000 * diff) / mtime) / 1024; /* KiB/s */
317 else
318 this_rate = 0;
319
320 if (unified_rw_rep) {
321 rate[i] = 0;
322 rate[0] += this_rate;
323 } else
324 rate[i] = this_rate;
325
326 prev_io_bytes[i] = io_bytes[i];
327 }
328}
329
330static void calc_iops(int unified_rw_rep, unsigned long mtime,
331 unsigned long long *io_iops,
332 unsigned long long *prev_io_iops, unsigned int *iops)
333{
334 int i;
335
336 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
337 unsigned long long diff, this_iops;
338
339 diff = io_iops[i] - prev_io_iops[i];
340 if (mtime)
341 this_iops = (diff * 1000) / mtime;
342 else
343 this_iops = 0;
344
345 if (unified_rw_rep) {
346 iops[i] = 0;
347 iops[0] += this_iops;
348 } else
349 iops[i] = this_iops;
350
351 prev_io_iops[i] = io_iops[i];
352 }
353}
354
355/*
356 * Allow a little slack - if we're within 95% of the time, allow ETA.
357 */
358bool eta_time_within_slack(unsigned int time)
359{
360 return time > ((eta_interval_msec * 95) / 100);
361}
362
363/*
364 * Print status of the jobs we know about. This includes rate estimates,
365 * ETA, thread state, etc.
366 */
367bool calc_thread_status(struct jobs_eta *je, int force)
368{
369 struct thread_data *td;
370 int i, unified_rw_rep;
371 uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
372 unsigned long long io_bytes[DDIR_RWDIR_CNT];
373 unsigned long long io_iops[DDIR_RWDIR_CNT];
374 struct timespec now;
375
376 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
377 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
378 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
379 static struct timespec rate_prev_time, disp_prev_time;
380
381 if (!force) {
382 if (!(output_format & FIO_OUTPUT_NORMAL) &&
383 f_out == stdout)
384 return false;
385 if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
386 return false;
387
388 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
389 return false;
390 }
391
392 if (!ddir_rw_sum(rate_io_bytes))
393 fill_start_time(&rate_prev_time);
394 if (!ddir_rw_sum(disp_io_bytes))
395 fill_start_time(&disp_prev_time);
396
397 eta_secs = malloc(thread_number * sizeof(uint64_t));
398 memset(eta_secs, 0, thread_number * sizeof(uint64_t));
399
400 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
401
402 io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
403 io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
404 bw_avg_time = ULONG_MAX;
405 unified_rw_rep = 0;
406 for_each_td(td, i) {
407 unified_rw_rep += td->o.unified_rw_rep;
408 if (is_power_of_2(td->o.kb_base))
409 je->is_pow2 = 1;
410 je->unit_base = td->o.unit_base;
411 if (td->o.bw_avg_time < bw_avg_time)
412 bw_avg_time = td->o.bw_avg_time;
413 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
414 || td->runstate == TD_FSYNCING
415 || td->runstate == TD_PRE_READING
416 || td->runstate == TD_FINISHING) {
417 je->nr_running++;
418 if (td_read(td)) {
419 je->t_rate[0] += td->o.rate[DDIR_READ];
420 je->t_iops[0] += td->o.rate_iops[DDIR_READ];
421 je->m_rate[0] += td->o.ratemin[DDIR_READ];
422 je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
423 }
424 if (td_write(td)) {
425 je->t_rate[1] += td->o.rate[DDIR_WRITE];
426 je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
427 je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
428 je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
429 }
430 if (td_trim(td)) {
431 je->t_rate[2] += td->o.rate[DDIR_TRIM];
432 je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
433 je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
434 je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
435 }
436
437 je->files_open += td->nr_open_files;
438 } else if (td->runstate == TD_RAMP) {
439 je->nr_running++;
440 je->nr_ramp++;
441 } else if (td->runstate == TD_SETTING_UP)
442 je->nr_setting_up++;
443 else if (td->runstate < TD_RUNNING)
444 je->nr_pending++;
445
446 if (je->elapsed_sec >= 3)
447 eta_secs[i] = thread_eta(td);
448 else
449 eta_secs[i] = INT_MAX;
450
451 check_str_update(td);
452
453 if (td->runstate > TD_SETTING_UP) {
454 int ddir;
455
456 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
457 if (unified_rw_rep) {
458 io_bytes[0] += td->io_bytes[ddir];
459 io_iops[0] += td->io_blocks[ddir];
460 } else {
461 io_bytes[ddir] += td->io_bytes[ddir];
462 io_iops[ddir] += td->io_blocks[ddir];
463 }
464 }
465 }
466 }
467
468 if (exitall_on_terminate) {
469 je->eta_sec = INT_MAX;
470 for_each_td(td, i) {
471 if (eta_secs[i] < je->eta_sec)
472 je->eta_sec = eta_secs[i];
473 }
474 } else {
475 unsigned long eta_stone = 0;
476
477 je->eta_sec = 0;
478 for_each_td(td, i) {
479 if ((td->runstate == TD_NOT_CREATED) && td->o.stonewall)
480 eta_stone += eta_secs[i];
481 else {
482 if (eta_secs[i] > je->eta_sec)
483 je->eta_sec = eta_secs[i];
484 }
485 }
486 je->eta_sec += eta_stone;
487 }
488
489 free(eta_secs);
490
491 fio_gettime(&now, NULL);
492 rate_time = mtime_since(&rate_prev_time, &now);
493
494 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
495 calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
496 je->rate);
497 memcpy(&rate_prev_time, &now, sizeof(now));
498 add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0);
499 add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0);
500 add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0);
501 }
502
503 disp_time = mtime_since(&disp_prev_time, &now);
504
505 if (!force && !eta_time_within_slack(disp_time))
506 return false;
507
508 calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
509 calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
510
511 memcpy(&disp_prev_time, &now, sizeof(now));
512
513 if (!force && !je->nr_running && !je->nr_pending)
514 return false;
515
516 je->nr_threads = thread_number;
517 update_condensed_str(__run_str, run_str);
518 memcpy(je->run_str, run_str, strlen(run_str));
519 return true;
520}
521
522void display_thread_status(struct jobs_eta *je)
523{
524 static struct timespec disp_eta_new_line;
525 static int eta_new_line_init, eta_new_line_pending;
526 static int linelen_last;
527 static int eta_good;
528 char output[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 512], *p = output;
529 char eta_str[128];
530 double perc = 0.0;
531
532 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
533 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
534 eta_to_str(eta_str, je->eta_sec);
535 }
536
537 if (eta_new_line_pending) {
538 eta_new_line_pending = 0;
539 linelen_last = 0;
540 p += sprintf(p, "\n");
541 }
542
543 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
544
545 /* rate limits, if any */
546 if (je->m_rate[0] || je->m_rate[1] || je->m_rate[2] ||
547 je->t_rate[0] || je->t_rate[1] || je->t_rate[2]) {
548 char *tr, *mr;
549
550 mr = num2str(je->m_rate[0] + je->m_rate[1] + je->m_rate[2],
551 je->sig_figs, 0, je->is_pow2, N2S_BYTEPERSEC);
552 tr = num2str(je->t_rate[0] + je->t_rate[1] + je->t_rate[2],
553 je->sig_figs, 0, je->is_pow2, N2S_BYTEPERSEC);
554
555 p += sprintf(p, ", %s-%s", mr, tr);
556 free(tr);
557 free(mr);
558 } else if (je->m_iops[0] || je->m_iops[1] || je->m_iops[2] ||
559 je->t_iops[0] || je->t_iops[1] || je->t_iops[2]) {
560 p += sprintf(p, ", %d-%d IOPS",
561 je->m_iops[0] + je->m_iops[1] + je->m_iops[2],
562 je->t_iops[0] + je->t_iops[1] + je->t_iops[2]);
563 }
564
565 /* current run string, % done, bandwidth, iops, eta */
566 if (je->eta_sec != INT_MAX && je->nr_running) {
567 char perc_str[32];
568 char *iops_str[DDIR_RWDIR_CNT];
569 char *rate_str[DDIR_RWDIR_CNT];
570 size_t left;
571 int l;
572 int ddir;
573 int linelen;
574
575 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running ||
576 je->eta_sec == -1)
577 strcpy(perc_str, "-.-%");
578 else {
579 double mult = 100.0;
580
581 if (je->nr_setting_up && je->nr_running)
582 mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
583
584 eta_good = 1;
585 perc *= mult;
586 sprintf(perc_str, "%3.1f%%", perc);
587 }
588
589 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
590 rate_str[ddir] = num2str(je->rate[ddir], 4,
591 1024, je->is_pow2, je->unit_base);
592 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, N2S_NONE);
593 }
594
595 left = sizeof(output) - (p - output) - 1;
596
597 if (je->rate[DDIR_TRIM] || je->iops[DDIR_TRIM])
598 l = snprintf(p, left,
599 ": [%s][%s][r=%s,w=%s,t=%s][r=%s,w=%s,t=%s IOPS][eta %s]",
600 je->run_str, perc_str, rate_str[DDIR_READ],
601 rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
602 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
603 iops_str[DDIR_TRIM], eta_str);
604 else
605 l = snprintf(p, left,
606 ": [%s][%s][r=%s,w=%s][r=%s,w=%s IOPS][eta %s]",
607 je->run_str, perc_str,
608 rate_str[DDIR_READ], rate_str[DDIR_WRITE],
609 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
610 eta_str);
611 /* If truncation occurred adjust l so p is on the null */
612 if (l >= left)
613 l = left - 1;
614 p += l;
615 linelen = p - output;
616 if (l >= 0 && linelen < linelen_last)
617 p += sprintf(p, "%*s", linelen_last - linelen, "");
618 linelen_last = linelen;
619
620 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
621 free(rate_str[ddir]);
622 free(iops_str[ddir]);
623 }
624 }
625 p += sprintf(p, "\r");
626
627 printf("%s", output);
628
629 if (!eta_new_line_init) {
630 fio_gettime(&disp_eta_new_line, NULL);
631 eta_new_line_init = 1;
632 } else if (eta_new_line && mtime_since_now(&disp_eta_new_line) > eta_new_line) {
633 fio_gettime(&disp_eta_new_line, NULL);
634 eta_new_line_pending = 1;
635 }
636
637 fflush(stdout);
638}
639
640struct jobs_eta *get_jobs_eta(bool force, size_t *size)
641{
642 struct jobs_eta *je;
643
644 if (!thread_number)
645 return NULL;
646
647 *size = sizeof(*je) + THREAD_RUNSTR_SZ + 8;
648 je = malloc(*size);
649 if (!je)
650 return NULL;
651 memset(je, 0, *size);
652
653 if (!calc_thread_status(je, force)) {
654 free(je);
655 return NULL;
656 }
657
658 *size = sizeof(*je) + strlen((char *) je->run_str) + 1;
659 return je;
660}
661
662void print_thread_status(void)
663{
664 struct jobs_eta *je;
665 size_t size;
666
667 je = get_jobs_eta(false, &size);
668 if (je)
669 display_thread_status(je);
670
671 free(je);
672}
673
674void print_status_init(int thr_number)
675{
676 DRD_IGNORE_VAR(__run_str);
677 __run_str[thr_number] = 'P';
678 update_condensed_str(__run_str, run_str);
679}