Fix two minor typos
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7
8#include "fio.h"
9
10static char __run_str[REAL_MAX_JOBS + 1];
11
12/*
13 * Worst level condensing would be 1:4, so allow enough room for that
14 */
15static char run_str[(4 * REAL_MAX_JOBS) + 1];
16
17static void update_condensed_str(char *run_str, char *run_str_condensed)
18{
19 int i, ci, last, nr;
20 size_t len;
21
22 len = strlen(run_str);
23 if (!len)
24 return;
25
26 last = 0;
27 nr = 0;
28 ci = 0;
29 for (i = 0; i < len; i++) {
30 if (!last) {
31new:
32 run_str_condensed[ci] = run_str[i];
33 last = run_str[i];
34 nr = 1;
35 ci++;
36 } else if (last == run_str[i]) {
37 nr++;
38 } else {
39 ci += sprintf(&run_str_condensed[ci], "(%u),", nr);
40 goto new;
41 }
42 }
43
44 if (nr)
45 ci += sprintf(&run_str_condensed[ci], "(%u)", nr);
46
47 run_str_condensed[ci + 1] = '\0';
48}
49
50/*
51 * Sets the status of the 'td' in the printed status map.
52 */
53static void check_str_update(struct thread_data *td)
54{
55 char c = __run_str[td->thread_number - 1];
56
57 switch (td->runstate) {
58 case TD_REAPED:
59 if (td->error)
60 c = 'X';
61 else if (td->sig)
62 c = 'K';
63 else
64 c = '_';
65 break;
66 case TD_EXITED:
67 c = 'E';
68 break;
69 case TD_RAMP:
70 c = '/';
71 break;
72 case TD_RUNNING:
73 if (td_rw(td)) {
74 if (td_random(td)) {
75 if (td->o.rwmix[DDIR_READ] == 100)
76 c = 'r';
77 else if (td->o.rwmix[DDIR_WRITE] == 100)
78 c = 'w';
79 else
80 c = 'm';
81 } else {
82 if (td->o.rwmix[DDIR_READ] == 100)
83 c = 'R';
84 else if (td->o.rwmix[DDIR_WRITE] == 100)
85 c = 'W';
86 else
87 c = 'M';
88 }
89 } else if (td_read(td)) {
90 if (td_random(td))
91 c = 'r';
92 else
93 c = 'R';
94 } else if (td_write(td)) {
95 if (td_random(td))
96 c = 'w';
97 else
98 c = 'W';
99 } else {
100 if (td_random(td))
101 c = 'd';
102 else
103 c = 'D';
104 }
105 break;
106 case TD_PRE_READING:
107 c = 'p';
108 break;
109 case TD_VERIFYING:
110 c = 'V';
111 break;
112 case TD_FSYNCING:
113 c = 'F';
114 break;
115 case TD_FINISHING:
116 c = 'f';
117 break;
118 case TD_CREATED:
119 c = 'C';
120 break;
121 case TD_INITIALIZED:
122 case TD_SETTING_UP:
123 c = 'I';
124 break;
125 case TD_NOT_CREATED:
126 c = 'P';
127 break;
128 default:
129 log_err("state %d\n", td->runstate);
130 }
131
132 __run_str[td->thread_number - 1] = c;
133 update_condensed_str(__run_str, run_str);
134}
135
136/*
137 * Convert seconds to a printable string.
138 */
139void eta_to_str(char *str, unsigned long eta_sec)
140{
141 unsigned int d, h, m, s;
142 int disp_hour = 0;
143
144 s = eta_sec % 60;
145 eta_sec /= 60;
146 m = eta_sec % 60;
147 eta_sec /= 60;
148 h = eta_sec % 24;
149 eta_sec /= 24;
150 d = eta_sec;
151
152 if (d) {
153 disp_hour = 1;
154 str += sprintf(str, "%02ud:", d);
155 }
156
157 if (h || disp_hour)
158 str += sprintf(str, "%02uh:", h);
159
160 str += sprintf(str, "%02um:", m);
161 str += sprintf(str, "%02us", s);
162}
163
164/*
165 * Best effort calculation of the estimated pending runtime of a job.
166 */
167static int thread_eta(struct thread_data *td)
168{
169 unsigned long long bytes_total, bytes_done;
170 unsigned long eta_sec = 0;
171 unsigned long elapsed;
172 uint64_t timeout;
173
174 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
175 timeout = td->o.timeout / 1000000UL;
176
177 bytes_total = td->total_io_size;
178
179 if (td->o.fill_device && td->o.size == -1ULL) {
180 if (!td->fill_device_size || td->fill_device_size == -1ULL)
181 return 0;
182
183 bytes_total = td->fill_device_size;
184 }
185
186 if (td->o.zone_size && td->o.zone_skip && bytes_total) {
187 unsigned int nr_zones;
188 uint64_t zone_bytes;
189
190 zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
191 nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
192 bytes_total -= nr_zones * td->o.zone_skip;
193 }
194
195 /*
196 * if writing and verifying afterwards, bytes_total will be twice the
197 * size. In a mixed workload, verify phase will be the size of the
198 * first stage writes.
199 */
200 if (td->o.do_verify && td->o.verify && td_write(td)) {
201 if (td_rw(td)) {
202 unsigned int perc = 50;
203
204 if (td->o.rwmix[DDIR_WRITE])
205 perc = td->o.rwmix[DDIR_WRITE];
206
207 bytes_total += (bytes_total * perc) / 100;
208 } else
209 bytes_total <<= 1;
210 }
211
212 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
213 double perc, perc_t;
214
215 bytes_done = ddir_rw_sum(td->io_bytes);
216
217 if (bytes_total) {
218 perc = (double) bytes_done / (double) bytes_total;
219 if (perc > 1.0)
220 perc = 1.0;
221 } else
222 perc = 0.0;
223
224 if (td->o.time_based) {
225 if (timeout) {
226 perc_t = (double) elapsed / (double) timeout;
227 if (perc_t < perc)
228 perc = perc_t;
229 } else {
230 /*
231 * Will never hit, we can't have time_based
232 * without a timeout set.
233 */
234 perc = 0.0;
235 }
236 }
237
238 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
239
240 if (td->o.timeout &&
241 eta_sec > (timeout + done_secs - elapsed))
242 eta_sec = timeout + done_secs - elapsed;
243 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
244 || td->runstate == TD_INITIALIZED
245 || td->runstate == TD_SETTING_UP
246 || td->runstate == TD_RAMP
247 || td->runstate == TD_PRE_READING) {
248 int t_eta = 0, r_eta = 0;
249 unsigned long long rate_bytes;
250
251 /*
252 * We can only guess - assume it'll run the full timeout
253 * if given, otherwise assume it'll run at the specified rate.
254 */
255 if (td->o.timeout) {
256 uint64_t timeout = td->o.timeout;
257 uint64_t start_delay = td->o.start_delay;
258 uint64_t ramp_time = td->o.ramp_time;
259
260 t_eta = timeout + start_delay + ramp_time;
261 t_eta /= 1000000ULL;
262
263 if (in_ramp_time(td)) {
264 unsigned long ramp_left;
265
266 ramp_left = mtime_since_now(&td->epoch);
267 ramp_left = (ramp_left + 999) / 1000;
268 if (ramp_left <= t_eta)
269 t_eta -= ramp_left;
270 }
271 }
272 rate_bytes = ddir_rw_sum(td->o.rate);
273 if (rate_bytes) {
274 r_eta = (bytes_total / 1024) / rate_bytes;
275 r_eta += (td->o.start_delay / 1000000ULL);
276 }
277
278 if (r_eta && t_eta)
279 eta_sec = min(r_eta, t_eta);
280 else if (r_eta)
281 eta_sec = r_eta;
282 else if (t_eta)
283 eta_sec = t_eta;
284 else
285 eta_sec = 0;
286 } else {
287 /*
288 * thread is already done or waiting for fsync
289 */
290 eta_sec = 0;
291 }
292
293 return eta_sec;
294}
295
296static void calc_rate(int unified_rw_rep, unsigned long mtime,
297 unsigned long long *io_bytes,
298 unsigned long long *prev_io_bytes, unsigned int *rate)
299{
300 int i;
301
302 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
303 unsigned long long diff;
304
305 diff = io_bytes[i] - prev_io_bytes[i];
306 if (unified_rw_rep) {
307 rate[i] = 0;
308 rate[0] += ((1000 * diff) / mtime) / 1024;
309 } else
310 rate[i] = ((1000 * diff) / mtime) / 1024;
311
312 prev_io_bytes[i] = io_bytes[i];
313 }
314}
315
316static void calc_iops(int unified_rw_rep, unsigned long mtime,
317 unsigned long long *io_iops,
318 unsigned long long *prev_io_iops, unsigned int *iops)
319{
320 int i;
321
322 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
323 unsigned long long diff;
324
325 diff = io_iops[i] - prev_io_iops[i];
326 if (unified_rw_rep) {
327 iops[i] = 0;
328 iops[0] += (diff * 1000) / mtime;
329 } else
330 iops[i] = (diff * 1000) / mtime;
331
332 prev_io_iops[i] = io_iops[i];
333 }
334}
335
336/*
337 * Print status of the jobs we know about. This includes rate estimates,
338 * ETA, thread state, etc.
339 */
340int calc_thread_status(struct jobs_eta *je, int force)
341{
342 struct thread_data *td;
343 int i, unified_rw_rep;
344 unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
345 unsigned long long io_bytes[DDIR_RWDIR_CNT];
346 unsigned long long io_iops[DDIR_RWDIR_CNT];
347 struct timeval now;
348
349 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
350 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
351 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
352 static struct timeval rate_prev_time, disp_prev_time;
353
354 if (!force) {
355 if (output_format != FIO_OUTPUT_NORMAL &&
356 f_out == stdout)
357 return 0;
358 if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
359 return 0;
360
361 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
362 return 0;
363 }
364
365 if (!ddir_rw_sum(rate_io_bytes))
366 fill_start_time(&rate_prev_time);
367 if (!ddir_rw_sum(disp_io_bytes))
368 fill_start_time(&disp_prev_time);
369
370 eta_secs = malloc(thread_number * sizeof(unsigned long));
371 memset(eta_secs, 0, thread_number * sizeof(unsigned long));
372
373 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
374
375 io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
376 io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
377 bw_avg_time = ULONG_MAX;
378 unified_rw_rep = 0;
379 for_each_td(td, i) {
380 unified_rw_rep += td->o.unified_rw_rep;
381 if (is_power_of_2(td->o.kb_base))
382 je->is_pow2 = 1;
383 je->unit_base = td->o.unit_base;
384 if (td->o.bw_avg_time < bw_avg_time)
385 bw_avg_time = td->o.bw_avg_time;
386 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
387 || td->runstate == TD_FSYNCING
388 || td->runstate == TD_PRE_READING
389 || td->runstate == TD_FINISHING) {
390 je->nr_running++;
391 if (td_read(td)) {
392 je->t_rate[0] += td->o.rate[DDIR_READ];
393 je->t_iops[0] += td->o.rate_iops[DDIR_READ];
394 je->m_rate[0] += td->o.ratemin[DDIR_READ];
395 je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
396 }
397 if (td_write(td)) {
398 je->t_rate[1] += td->o.rate[DDIR_WRITE];
399 je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
400 je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
401 je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
402 }
403 if (td_trim(td)) {
404 je->t_rate[2] += td->o.rate[DDIR_TRIM];
405 je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
406 je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
407 je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
408 }
409
410 je->files_open += td->nr_open_files;
411 } else if (td->runstate == TD_RAMP) {
412 je->nr_running++;
413 je->nr_ramp++;
414 } else if (td->runstate == TD_SETTING_UP) {
415 je->nr_running++;
416 je->nr_setting_up++;
417 } else if (td->runstate < TD_RUNNING)
418 je->nr_pending++;
419
420 if (je->elapsed_sec >= 3)
421 eta_secs[i] = thread_eta(td);
422 else
423 eta_secs[i] = INT_MAX;
424
425 check_str_update(td);
426
427 if (td->runstate > TD_SETTING_UP) {
428 int ddir;
429
430 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
431 if (unified_rw_rep) {
432 io_bytes[0] += td->io_bytes[ddir];
433 io_iops[0] += td->io_blocks[ddir];
434 } else {
435 io_bytes[ddir] += td->io_bytes[ddir];
436 io_iops[ddir] += td->io_blocks[ddir];
437 }
438 }
439 }
440 }
441
442 if (exitall_on_terminate)
443 je->eta_sec = INT_MAX;
444 else
445 je->eta_sec = 0;
446
447 for_each_td(td, i) {
448 if (exitall_on_terminate) {
449 if (eta_secs[i] < je->eta_sec)
450 je->eta_sec = eta_secs[i];
451 } else {
452 if (eta_secs[i] > je->eta_sec)
453 je->eta_sec = eta_secs[i];
454 }
455 }
456
457 free(eta_secs);
458
459 fio_gettime(&now, NULL);
460 rate_time = mtime_since(&rate_prev_time, &now);
461
462 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
463 calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
464 je->rate);
465 memcpy(&rate_prev_time, &now, sizeof(now));
466 add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
467 add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
468 add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0);
469 }
470
471 disp_time = mtime_since(&disp_prev_time, &now);
472
473 /*
474 * Allow a little slack, the target is to print it every 1000 msecs
475 */
476 if (!force && disp_time < 900)
477 return 0;
478
479 calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
480 calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
481
482 memcpy(&disp_prev_time, &now, sizeof(now));
483
484 if (!force && !je->nr_running && !je->nr_pending)
485 return 0;
486
487 je->nr_threads = thread_number;
488 update_condensed_str(__run_str, run_str);
489 memcpy(je->run_str, run_str, strlen(run_str));
490 return 1;
491}
492
493void display_thread_status(struct jobs_eta *je)
494{
495 static struct timeval disp_eta_new_line;
496 static int eta_new_line_init, eta_new_line_pending;
497 static int linelen_last;
498 static int eta_good;
499 char output[REAL_MAX_JOBS + 512], *p = output;
500 char eta_str[128];
501 double perc = 0.0;
502
503 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
504 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
505 eta_to_str(eta_str, je->eta_sec);
506 }
507
508 if (eta_new_line_pending) {
509 eta_new_line_pending = 0;
510 p += sprintf(p, "\n");
511 }
512
513 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
514 if (je->m_rate[0] || je->m_rate[1] || je->t_rate[0] || je->t_rate[1]) {
515 char *tr, *mr;
516
517 mr = num2str(je->m_rate[0] + je->m_rate[1], 4, 0, je->is_pow2, 8);
518 tr = num2str(je->t_rate[0] + je->t_rate[1], 4, 0, je->is_pow2, 8);
519 p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
520 free(tr);
521 free(mr);
522 } else if (je->m_iops[0] || je->m_iops[1] || je->t_iops[0] || je->t_iops[1]) {
523 p += sprintf(p, ", CR=%d/%d IOPS",
524 je->t_iops[0] + je->t_iops[1],
525 je->m_iops[0] + je->m_iops[1]);
526 }
527 if (je->eta_sec != INT_MAX && je->nr_running) {
528 char perc_str[32];
529 char *iops_str[DDIR_RWDIR_CNT];
530 char *rate_str[DDIR_RWDIR_CNT];
531 size_t left;
532 int l;
533 int ddir;
534
535 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
536 strcpy(perc_str, "-.-% done");
537 else {
538 double mult = 100.0;
539
540 if (je->nr_setting_up && je->nr_running)
541 mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
542
543 eta_good = 1;
544 perc *= mult;
545 sprintf(perc_str, "%3.1f%% done", perc);
546 }
547
548 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
549 rate_str[ddir] = num2str(je->rate[ddir], 5,
550 1024, je->is_pow2, je->unit_base);
551 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, 0);
552 }
553
554 left = sizeof(output) - (p - output) - 1;
555
556 l = snprintf(p, left, ": [%s] [%s] [%s/%s/%s /s] [%s/%s/%s iops] [eta %s]",
557 je->run_str, perc_str, rate_str[DDIR_READ],
558 rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
559 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
560 iops_str[DDIR_TRIM], eta_str);
561 p += l;
562 if (l >= 0 && l < linelen_last)
563 p += sprintf(p, "%*s", linelen_last - l, "");
564 linelen_last = l;
565
566 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
567 free(rate_str[ddir]);
568 free(iops_str[ddir]);
569 }
570 }
571 p += sprintf(p, "\r");
572
573 printf("%s", output);
574
575 if (!eta_new_line_init) {
576 fio_gettime(&disp_eta_new_line, NULL);
577 eta_new_line_init = 1;
578 } else if (eta_new_line &&
579 mtime_since_now(&disp_eta_new_line) > eta_new_line * 1000) {
580 fio_gettime(&disp_eta_new_line, NULL);
581 eta_new_line_pending = 1;
582 }
583
584 fflush(stdout);
585}
586
587void print_thread_status(void)
588{
589 struct jobs_eta *je;
590 size_t size;
591
592 if (!thread_number)
593 return;
594
595 size = sizeof(*je) + thread_number * sizeof(char) + 1;
596 je = malloc(size);
597 memset(je, 0, size);
598
599 if (calc_thread_status(je, 0))
600 display_thread_status(je);
601
602 free(je);
603}
604
605void print_status_init(int thr_number)
606{
607 __run_str[thr_number] = 'P';
608 update_condensed_str(__run_str, run_str);
609}