Merge branch 'wip-ifed-howto-update' of https://github.com/ifed01/fio
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7
8#include "fio.h"
9#include "lib/pow2.h"
10
11static char __run_str[REAL_MAX_JOBS + 1];
12static char run_str[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 1];
13
14static void update_condensed_str(char *rstr, char *run_str_condensed)
15{
16 if (*rstr) {
17 while (*rstr) {
18 int nr = 1;
19
20 *run_str_condensed++ = *rstr++;
21 while (*(rstr - 1) == *rstr) {
22 rstr++;
23 nr++;
24 }
25 run_str_condensed += sprintf(run_str_condensed, "(%u),", nr);
26 }
27 run_str_condensed--;
28 }
29 *run_str_condensed = '\0';
30}
31
32/*
33 * Sets the status of the 'td' in the printed status map.
34 */
35static void check_str_update(struct thread_data *td)
36{
37 char c = __run_str[td->thread_number - 1];
38
39 switch (td->runstate) {
40 case TD_REAPED:
41 if (td->error)
42 c = 'X';
43 else if (td->sig)
44 c = 'K';
45 else
46 c = '_';
47 break;
48 case TD_EXITED:
49 c = 'E';
50 break;
51 case TD_RAMP:
52 c = '/';
53 break;
54 case TD_RUNNING:
55 if (td_rw(td)) {
56 if (td_random(td)) {
57 if (td->o.rwmix[DDIR_READ] == 100)
58 c = 'r';
59 else if (td->o.rwmix[DDIR_WRITE] == 100)
60 c = 'w';
61 else
62 c = 'm';
63 } else {
64 if (td->o.rwmix[DDIR_READ] == 100)
65 c = 'R';
66 else if (td->o.rwmix[DDIR_WRITE] == 100)
67 c = 'W';
68 else
69 c = 'M';
70 }
71 } else if (td_read(td)) {
72 if (td_random(td))
73 c = 'r';
74 else
75 c = 'R';
76 } else if (td_write(td)) {
77 if (td_random(td))
78 c = 'w';
79 else
80 c = 'W';
81 } else {
82 if (td_random(td))
83 c = 'd';
84 else
85 c = 'D';
86 }
87 break;
88 case TD_PRE_READING:
89 c = 'p';
90 break;
91 case TD_VERIFYING:
92 c = 'V';
93 break;
94 case TD_FSYNCING:
95 c = 'F';
96 break;
97 case TD_FINISHING:
98 c = 'f';
99 break;
100 case TD_CREATED:
101 c = 'C';
102 break;
103 case TD_INITIALIZED:
104 case TD_SETTING_UP:
105 c = 'I';
106 break;
107 case TD_NOT_CREATED:
108 c = 'P';
109 break;
110 default:
111 log_err("state %d\n", td->runstate);
112 }
113
114 __run_str[td->thread_number - 1] = c;
115 update_condensed_str(__run_str, run_str);
116}
117
118/*
119 * Convert seconds to a printable string.
120 */
121void eta_to_str(char *str, unsigned long eta_sec)
122{
123 unsigned int d, h, m, s;
124 int disp_hour = 0;
125
126 if (eta_sec == -1) {
127 sprintf(str, "--");
128 return;
129 }
130
131 s = eta_sec % 60;
132 eta_sec /= 60;
133 m = eta_sec % 60;
134 eta_sec /= 60;
135 h = eta_sec % 24;
136 eta_sec /= 24;
137 d = eta_sec;
138
139 if (d) {
140 disp_hour = 1;
141 str += sprintf(str, "%02ud:", d);
142 }
143
144 if (h || disp_hour)
145 str += sprintf(str, "%02uh:", h);
146
147 str += sprintf(str, "%02um:", m);
148 str += sprintf(str, "%02us", s);
149}
150
151/*
152 * Best effort calculation of the estimated pending runtime of a job.
153 */
154static unsigned long thread_eta(struct thread_data *td)
155{
156 unsigned long long bytes_total, bytes_done;
157 unsigned long eta_sec = 0;
158 unsigned long elapsed;
159 uint64_t timeout;
160
161 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
162 timeout = td->o.timeout / 1000000UL;
163
164 bytes_total = td->total_io_size;
165
166 if (td->flags & TD_F_NO_PROGRESS)
167 return -1;
168
169 if (td->o.fill_device && td->o.size == -1ULL) {
170 if (!td->fill_device_size || td->fill_device_size == -1ULL)
171 return 0;
172
173 bytes_total = td->fill_device_size;
174 }
175
176 if (td->o.zone_size && td->o.zone_skip && bytes_total) {
177 unsigned int nr_zones;
178 uint64_t zone_bytes;
179
180 zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
181 nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
182 bytes_total -= nr_zones * td->o.zone_skip;
183 }
184
185 /*
186 * if writing and verifying afterwards, bytes_total will be twice the
187 * size. In a mixed workload, verify phase will be the size of the
188 * first stage writes.
189 */
190 if (td->o.do_verify && td->o.verify && td_write(td)) {
191 if (td_rw(td)) {
192 unsigned int perc = 50;
193
194 if (td->o.rwmix[DDIR_WRITE])
195 perc = td->o.rwmix[DDIR_WRITE];
196
197 bytes_total += (bytes_total * perc) / 100;
198 } else
199 bytes_total <<= 1;
200 }
201
202 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
203 double perc, perc_t;
204
205 bytes_done = ddir_rw_sum(td->io_bytes);
206
207 if (bytes_total) {
208 perc = (double) bytes_done / (double) bytes_total;
209 if (perc > 1.0)
210 perc = 1.0;
211 } else
212 perc = 0.0;
213
214 if (td->o.time_based) {
215 if (timeout) {
216 perc_t = (double) elapsed / (double) timeout;
217 if (perc_t < perc)
218 perc = perc_t;
219 } else {
220 /*
221 * Will never hit, we can't have time_based
222 * without a timeout set.
223 */
224 perc = 0.0;
225 }
226 }
227
228 if (perc == 0.0) {
229 eta_sec = timeout;
230 } else {
231 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
232 }
233
234 if (td->o.timeout &&
235 eta_sec > (timeout + done_secs - elapsed))
236 eta_sec = timeout + done_secs - elapsed;
237 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
238 || td->runstate == TD_INITIALIZED
239 || td->runstate == TD_SETTING_UP
240 || td->runstate == TD_RAMP
241 || td->runstate == TD_PRE_READING) {
242 int64_t t_eta = 0, r_eta = 0;
243 unsigned long long rate_bytes;
244
245 /*
246 * We can only guess - assume it'll run the full timeout
247 * if given, otherwise assume it'll run at the specified rate.
248 */
249 if (td->o.timeout) {
250 uint64_t __timeout = td->o.timeout;
251 uint64_t start_delay = td->o.start_delay;
252 uint64_t ramp_time = td->o.ramp_time;
253
254 t_eta = __timeout + start_delay;
255 if (!td->ramp_time_over) {
256 t_eta += ramp_time;
257 }
258 t_eta /= 1000000ULL;
259
260 if ((td->runstate == TD_RAMP) && in_ramp_time(td)) {
261 unsigned long ramp_left;
262
263 ramp_left = mtime_since_now(&td->epoch);
264 ramp_left = (ramp_left + 999) / 1000;
265 if (ramp_left <= t_eta)
266 t_eta -= ramp_left;
267 }
268 }
269 rate_bytes = 0;
270 if (td_read(td))
271 rate_bytes = td->o.rate[DDIR_READ];
272 if (td_write(td))
273 rate_bytes += td->o.rate[DDIR_WRITE];
274 if (td_trim(td))
275 rate_bytes += td->o.rate[DDIR_TRIM];
276
277 if (rate_bytes) {
278 r_eta = bytes_total / rate_bytes;
279 r_eta += (td->o.start_delay / 1000000ULL);
280 }
281
282 if (r_eta && t_eta)
283 eta_sec = min(r_eta, t_eta);
284 else if (r_eta)
285 eta_sec = r_eta;
286 else if (t_eta)
287 eta_sec = t_eta;
288 else
289 eta_sec = 0;
290 } else {
291 /*
292 * thread is already done or waiting for fsync
293 */
294 eta_sec = 0;
295 }
296
297 return eta_sec;
298}
299
300static void calc_rate(int unified_rw_rep, unsigned long mtime,
301 unsigned long long *io_bytes,
302 unsigned long long *prev_io_bytes, uint64_t *rate)
303{
304 int i;
305
306 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
307 unsigned long long diff, this_rate;
308
309 diff = io_bytes[i] - prev_io_bytes[i];
310 if (mtime)
311 this_rate = ((1000 * diff) / mtime) / 1024; /* KiB/s */
312 else
313 this_rate = 0;
314
315 if (unified_rw_rep) {
316 rate[i] = 0;
317 rate[0] += this_rate;
318 } else
319 rate[i] = this_rate;
320
321 prev_io_bytes[i] = io_bytes[i];
322 }
323}
324
325static void calc_iops(int unified_rw_rep, unsigned long mtime,
326 unsigned long long *io_iops,
327 unsigned long long *prev_io_iops, unsigned int *iops)
328{
329 int i;
330
331 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
332 unsigned long long diff, this_iops;
333
334 diff = io_iops[i] - prev_io_iops[i];
335 if (mtime)
336 this_iops = (diff * 1000) / mtime;
337 else
338 this_iops = 0;
339
340 if (unified_rw_rep) {
341 iops[i] = 0;
342 iops[0] += this_iops;
343 } else
344 iops[i] = this_iops;
345
346 prev_io_iops[i] = io_iops[i];
347 }
348}
349
350/*
351 * Allow a little slack - if we're within 95% of the time, allow ETA.
352 */
353bool eta_time_within_slack(unsigned int time)
354{
355 return time > ((eta_interval_msec * 95) / 100);
356}
357
358/*
359 * Print status of the jobs we know about. This includes rate estimates,
360 * ETA, thread state, etc.
361 */
362bool calc_thread_status(struct jobs_eta *je, int force)
363{
364 struct thread_data *td;
365 int i, unified_rw_rep;
366 uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
367 unsigned long long io_bytes[DDIR_RWDIR_CNT];
368 unsigned long long io_iops[DDIR_RWDIR_CNT];
369 struct timespec now;
370
371 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
372 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
373 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
374 static struct timespec rate_prev_time, disp_prev_time;
375
376 if (!force) {
377 if (!(output_format & FIO_OUTPUT_NORMAL) &&
378 f_out == stdout)
379 return false;
380 if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
381 return false;
382
383 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
384 return false;
385 }
386
387 if (!ddir_rw_sum(rate_io_bytes))
388 fill_start_time(&rate_prev_time);
389 if (!ddir_rw_sum(disp_io_bytes))
390 fill_start_time(&disp_prev_time);
391
392 eta_secs = malloc(thread_number * sizeof(uint64_t));
393 memset(eta_secs, 0, thread_number * sizeof(uint64_t));
394
395 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
396
397 io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
398 io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
399 bw_avg_time = ULONG_MAX;
400 unified_rw_rep = 0;
401 for_each_td(td, i) {
402 unified_rw_rep += td->o.unified_rw_rep;
403 if (is_power_of_2(td->o.kb_base))
404 je->is_pow2 = 1;
405 je->unit_base = td->o.unit_base;
406 if (td->o.bw_avg_time < bw_avg_time)
407 bw_avg_time = td->o.bw_avg_time;
408 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
409 || td->runstate == TD_FSYNCING
410 || td->runstate == TD_PRE_READING
411 || td->runstate == TD_FINISHING) {
412 je->nr_running++;
413 if (td_read(td)) {
414 je->t_rate[0] += td->o.rate[DDIR_READ];
415 je->t_iops[0] += td->o.rate_iops[DDIR_READ];
416 je->m_rate[0] += td->o.ratemin[DDIR_READ];
417 je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
418 }
419 if (td_write(td)) {
420 je->t_rate[1] += td->o.rate[DDIR_WRITE];
421 je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
422 je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
423 je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
424 }
425 if (td_trim(td)) {
426 je->t_rate[2] += td->o.rate[DDIR_TRIM];
427 je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
428 je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
429 je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
430 }
431
432 je->files_open += td->nr_open_files;
433 } else if (td->runstate == TD_RAMP) {
434 je->nr_running++;
435 je->nr_ramp++;
436 } else if (td->runstate == TD_SETTING_UP)
437 je->nr_setting_up++;
438 else if (td->runstate < TD_RUNNING)
439 je->nr_pending++;
440
441 if (je->elapsed_sec >= 3)
442 eta_secs[i] = thread_eta(td);
443 else
444 eta_secs[i] = INT_MAX;
445
446 check_str_update(td);
447
448 if (td->runstate > TD_SETTING_UP) {
449 int ddir;
450
451 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
452 if (unified_rw_rep) {
453 io_bytes[0] += td->io_bytes[ddir];
454 io_iops[0] += td->io_blocks[ddir];
455 } else {
456 io_bytes[ddir] += td->io_bytes[ddir];
457 io_iops[ddir] += td->io_blocks[ddir];
458 }
459 }
460 }
461 }
462
463 if (exitall_on_terminate) {
464 je->eta_sec = INT_MAX;
465 for_each_td(td, i) {
466 if (eta_secs[i] < je->eta_sec)
467 je->eta_sec = eta_secs[i];
468 }
469 } else {
470 unsigned long eta_stone = 0;
471
472 je->eta_sec = 0;
473 for_each_td(td, i) {
474 if ((td->runstate == TD_NOT_CREATED) && td->o.stonewall)
475 eta_stone += eta_secs[i];
476 else {
477 if (eta_secs[i] > je->eta_sec)
478 je->eta_sec = eta_secs[i];
479 }
480 }
481 je->eta_sec += eta_stone;
482 }
483
484 free(eta_secs);
485
486 fio_gettime(&now, NULL);
487 rate_time = mtime_since(&rate_prev_time, &now);
488
489 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
490 calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
491 je->rate);
492 memcpy(&rate_prev_time, &now, sizeof(now));
493 add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0);
494 add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0);
495 add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0);
496 }
497
498 disp_time = mtime_since(&disp_prev_time, &now);
499
500 if (!force && !eta_time_within_slack(disp_time))
501 return false;
502
503 calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
504 calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
505
506 memcpy(&disp_prev_time, &now, sizeof(now));
507
508 if (!force && !je->nr_running && !je->nr_pending)
509 return false;
510
511 je->nr_threads = thread_number;
512 update_condensed_str(__run_str, run_str);
513 memcpy(je->run_str, run_str, strlen(run_str));
514 return true;
515}
516
517void display_thread_status(struct jobs_eta *je)
518{
519 static struct timespec disp_eta_new_line;
520 static int eta_new_line_init, eta_new_line_pending;
521 static int linelen_last;
522 static int eta_good;
523 char output[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS) + 512], *p = output;
524 char eta_str[128];
525 double perc = 0.0;
526
527 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
528 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
529 eta_to_str(eta_str, je->eta_sec);
530 }
531
532 if (eta_new_line_pending) {
533 eta_new_line_pending = 0;
534 linelen_last = 0;
535 p += sprintf(p, "\n");
536 }
537
538 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
539
540 /* rate limits, if any */
541 if (je->m_rate[0] || je->m_rate[1] || je->m_rate[2] ||
542 je->t_rate[0] || je->t_rate[1] || je->t_rate[2]) {
543 char *tr, *mr;
544
545 mr = num2str(je->m_rate[0] + je->m_rate[1] + je->m_rate[2],
546 je->sig_figs, 0, je->is_pow2, N2S_BYTEPERSEC);
547 tr = num2str(je->t_rate[0] + je->t_rate[1] + je->t_rate[2],
548 je->sig_figs, 0, je->is_pow2, N2S_BYTEPERSEC);
549
550 p += sprintf(p, ", %s-%s", mr, tr);
551 free(tr);
552 free(mr);
553 } else if (je->m_iops[0] || je->m_iops[1] || je->m_iops[2] ||
554 je->t_iops[0] || je->t_iops[1] || je->t_iops[2]) {
555 p += sprintf(p, ", %d-%d IOPS",
556 je->m_iops[0] + je->m_iops[1] + je->m_iops[2],
557 je->t_iops[0] + je->t_iops[1] + je->t_iops[2]);
558 }
559
560 /* current run string, % done, bandwidth, iops, eta */
561 if (je->eta_sec != INT_MAX && je->nr_running) {
562 char perc_str[32];
563 char *iops_str[DDIR_RWDIR_CNT];
564 char *rate_str[DDIR_RWDIR_CNT];
565 size_t left;
566 int l;
567 int ddir;
568 int linelen;
569
570 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running ||
571 je->eta_sec == -1)
572 strcpy(perc_str, "-.-%");
573 else {
574 double mult = 100.0;
575
576 if (je->nr_setting_up && je->nr_running)
577 mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
578
579 eta_good = 1;
580 perc *= mult;
581 sprintf(perc_str, "%3.1f%%", perc);
582 }
583
584 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
585 rate_str[ddir] = num2str(je->rate[ddir], 4,
586 1024, je->is_pow2, je->unit_base);
587 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, N2S_NONE);
588 }
589
590 left = sizeof(output) - (p - output) - 1;
591
592 if (je->rate[DDIR_TRIM] || je->iops[DDIR_TRIM])
593 l = snprintf(p, left,
594 ": [%s][%s][r=%s,w=%s,t=%s][r=%s,w=%s,t=%s IOPS][eta %s]",
595 je->run_str, perc_str, rate_str[DDIR_READ],
596 rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
597 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
598 iops_str[DDIR_TRIM], eta_str);
599 else
600 l = snprintf(p, left,
601 ": [%s][%s][r=%s,w=%s][r=%s,w=%s IOPS][eta %s]",
602 je->run_str, perc_str,
603 rate_str[DDIR_READ], rate_str[DDIR_WRITE],
604 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
605 eta_str);
606 /* If truncation occurred adjust l so p is on the null */
607 if (l >= left)
608 l = left - 1;
609 p += l;
610 linelen = p - output;
611 if (l >= 0 && linelen < linelen_last)
612 p += sprintf(p, "%*s", linelen_last - linelen, "");
613 linelen_last = linelen;
614
615 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
616 free(rate_str[ddir]);
617 free(iops_str[ddir]);
618 }
619 }
620 p += sprintf(p, "\r");
621
622 printf("%s", output);
623
624 if (!eta_new_line_init) {
625 fio_gettime(&disp_eta_new_line, NULL);
626 eta_new_line_init = 1;
627 } else if (eta_new_line && mtime_since_now(&disp_eta_new_line) > eta_new_line) {
628 fio_gettime(&disp_eta_new_line, NULL);
629 eta_new_line_pending = 1;
630 }
631
632 fflush(stdout);
633}
634
635struct jobs_eta *get_jobs_eta(bool force, size_t *size)
636{
637 struct jobs_eta *je;
638
639 if (!thread_number)
640 return NULL;
641
642 *size = sizeof(*je) + THREAD_RUNSTR_SZ + 8;
643 je = malloc(*size);
644 if (!je)
645 return NULL;
646 memset(je, 0, *size);
647
648 if (!calc_thread_status(je, force)) {
649 free(je);
650 return NULL;
651 }
652
653 *size = sizeof(*je) + strlen((char *) je->run_str) + 1;
654 return je;
655}
656
657void print_thread_status(void)
658{
659 struct jobs_eta *je;
660 size_t size;
661
662 je = get_jobs_eta(false, &size);
663 if (je)
664 display_thread_status(je);
665
666 free(je);
667}
668
669void print_status_init(int thr_number)
670{
671 __run_str[thr_number] = 'P';
672 update_condensed_str(__run_str, run_str);
673}