Add support for options being a power-of-2
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7
8#include "fio.h"
9#include "lib/pow2.h"
10
11static char __run_str[REAL_MAX_JOBS + 1];
12static char run_str[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS)];
13
14static void update_condensed_str(char *rstr, char *run_str_condensed)
15{
16 if (*rstr) {
17 while (*rstr) {
18 int nr = 1;
19
20 *run_str_condensed++ = *rstr++;
21 while (*(rstr - 1) == *rstr) {
22 rstr++;
23 nr++;
24 }
25 run_str_condensed += sprintf(run_str_condensed, "(%u),", nr);
26 }
27 run_str_condensed--;
28 }
29 *run_str_condensed = '\0';
30}
31
32/*
33 * Sets the status of the 'td' in the printed status map.
34 */
35static void check_str_update(struct thread_data *td)
36{
37 char c = __run_str[td->thread_number - 1];
38
39 switch (td->runstate) {
40 case TD_REAPED:
41 if (td->error)
42 c = 'X';
43 else if (td->sig)
44 c = 'K';
45 else
46 c = '_';
47 break;
48 case TD_EXITED:
49 c = 'E';
50 break;
51 case TD_RAMP:
52 c = '/';
53 break;
54 case TD_RUNNING:
55 if (td_rw(td)) {
56 if (td_random(td)) {
57 if (td->o.rwmix[DDIR_READ] == 100)
58 c = 'r';
59 else if (td->o.rwmix[DDIR_WRITE] == 100)
60 c = 'w';
61 else
62 c = 'm';
63 } else {
64 if (td->o.rwmix[DDIR_READ] == 100)
65 c = 'R';
66 else if (td->o.rwmix[DDIR_WRITE] == 100)
67 c = 'W';
68 else
69 c = 'M';
70 }
71 } else if (td_read(td)) {
72 if (td_random(td))
73 c = 'r';
74 else
75 c = 'R';
76 } else if (td_write(td)) {
77 if (td_random(td))
78 c = 'w';
79 else
80 c = 'W';
81 } else {
82 if (td_random(td))
83 c = 'd';
84 else
85 c = 'D';
86 }
87 break;
88 case TD_PRE_READING:
89 c = 'p';
90 break;
91 case TD_VERIFYING:
92 c = 'V';
93 break;
94 case TD_FSYNCING:
95 c = 'F';
96 break;
97 case TD_FINISHING:
98 c = 'f';
99 break;
100 case TD_CREATED:
101 c = 'C';
102 break;
103 case TD_INITIALIZED:
104 case TD_SETTING_UP:
105 c = 'I';
106 break;
107 case TD_NOT_CREATED:
108 c = 'P';
109 break;
110 default:
111 log_err("state %d\n", td->runstate);
112 }
113
114 __run_str[td->thread_number - 1] = c;
115 update_condensed_str(__run_str, run_str);
116}
117
118/*
119 * Convert seconds to a printable string.
120 */
121void eta_to_str(char *str, unsigned long eta_sec)
122{
123 unsigned int d, h, m, s;
124 int disp_hour = 0;
125
126 s = eta_sec % 60;
127 eta_sec /= 60;
128 m = eta_sec % 60;
129 eta_sec /= 60;
130 h = eta_sec % 24;
131 eta_sec /= 24;
132 d = eta_sec;
133
134 if (d) {
135 disp_hour = 1;
136 str += sprintf(str, "%02ud:", d);
137 }
138
139 if (h || disp_hour)
140 str += sprintf(str, "%02uh:", h);
141
142 str += sprintf(str, "%02um:", m);
143 str += sprintf(str, "%02us", s);
144}
145
146/*
147 * Best effort calculation of the estimated pending runtime of a job.
148 */
149static int thread_eta(struct thread_data *td)
150{
151 unsigned long long bytes_total, bytes_done;
152 unsigned long eta_sec = 0;
153 unsigned long elapsed;
154 uint64_t timeout;
155
156 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
157 timeout = td->o.timeout / 1000000UL;
158
159 bytes_total = td->total_io_size;
160
161 if (td->o.fill_device && td->o.size == -1ULL) {
162 if (!td->fill_device_size || td->fill_device_size == -1ULL)
163 return 0;
164
165 bytes_total = td->fill_device_size;
166 }
167
168 if (td->o.zone_size && td->o.zone_skip && bytes_total) {
169 unsigned int nr_zones;
170 uint64_t zone_bytes;
171
172 zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
173 nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
174 bytes_total -= nr_zones * td->o.zone_skip;
175 }
176
177 /*
178 * if writing and verifying afterwards, bytes_total will be twice the
179 * size. In a mixed workload, verify phase will be the size of the
180 * first stage writes.
181 */
182 if (td->o.do_verify && td->o.verify && td_write(td)) {
183 if (td_rw(td)) {
184 unsigned int perc = 50;
185
186 if (td->o.rwmix[DDIR_WRITE])
187 perc = td->o.rwmix[DDIR_WRITE];
188
189 bytes_total += (bytes_total * perc) / 100;
190 } else
191 bytes_total <<= 1;
192 }
193
194 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
195 double perc, perc_t;
196
197 bytes_done = ddir_rw_sum(td->io_bytes);
198
199 if (bytes_total) {
200 perc = (double) bytes_done / (double) bytes_total;
201 if (perc > 1.0)
202 perc = 1.0;
203 } else
204 perc = 0.0;
205
206 if (td->o.time_based) {
207 if (timeout) {
208 perc_t = (double) elapsed / (double) timeout;
209 if (perc_t < perc)
210 perc = perc_t;
211 } else {
212 /*
213 * Will never hit, we can't have time_based
214 * without a timeout set.
215 */
216 perc = 0.0;
217 }
218 }
219
220 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
221
222 if (td->o.timeout &&
223 eta_sec > (timeout + done_secs - elapsed))
224 eta_sec = timeout + done_secs - elapsed;
225 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
226 || td->runstate == TD_INITIALIZED
227 || td->runstate == TD_SETTING_UP
228 || td->runstate == TD_RAMP
229 || td->runstate == TD_PRE_READING) {
230 int t_eta = 0, r_eta = 0;
231 unsigned long long rate_bytes;
232
233 /*
234 * We can only guess - assume it'll run the full timeout
235 * if given, otherwise assume it'll run at the specified rate.
236 */
237 if (td->o.timeout) {
238 uint64_t __timeout = td->o.timeout;
239 uint64_t start_delay = td->o.start_delay;
240 uint64_t ramp_time = td->o.ramp_time;
241
242 t_eta = __timeout + start_delay + ramp_time;
243 t_eta /= 1000000ULL;
244
245 if (in_ramp_time(td)) {
246 unsigned long ramp_left;
247
248 ramp_left = mtime_since_now(&td->epoch);
249 ramp_left = (ramp_left + 999) / 1000;
250 if (ramp_left <= t_eta)
251 t_eta -= ramp_left;
252 }
253 }
254 rate_bytes = ddir_rw_sum(td->o.rate);
255 if (rate_bytes) {
256 r_eta = (bytes_total / 1024) / rate_bytes;
257 r_eta += (td->o.start_delay / 1000000ULL);
258 }
259
260 if (r_eta && t_eta)
261 eta_sec = min(r_eta, t_eta);
262 else if (r_eta)
263 eta_sec = r_eta;
264 else if (t_eta)
265 eta_sec = t_eta;
266 else
267 eta_sec = 0;
268 } else {
269 /*
270 * thread is already done or waiting for fsync
271 */
272 eta_sec = 0;
273 }
274
275 return eta_sec;
276}
277
278static void calc_rate(int unified_rw_rep, unsigned long mtime,
279 unsigned long long *io_bytes,
280 unsigned long long *prev_io_bytes, unsigned int *rate)
281{
282 int i;
283
284 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
285 unsigned long long diff;
286
287 diff = io_bytes[i] - prev_io_bytes[i];
288 if (unified_rw_rep) {
289 rate[i] = 0;
290 rate[0] += ((1000 * diff) / mtime) / 1024;
291 } else
292 rate[i] = ((1000 * diff) / mtime) / 1024;
293
294 prev_io_bytes[i] = io_bytes[i];
295 }
296}
297
298static void calc_iops(int unified_rw_rep, unsigned long mtime,
299 unsigned long long *io_iops,
300 unsigned long long *prev_io_iops, unsigned int *iops)
301{
302 int i;
303
304 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
305 unsigned long long diff;
306
307 diff = io_iops[i] - prev_io_iops[i];
308 if (unified_rw_rep) {
309 iops[i] = 0;
310 iops[0] += (diff * 1000) / mtime;
311 } else
312 iops[i] = (diff * 1000) / mtime;
313
314 prev_io_iops[i] = io_iops[i];
315 }
316}
317
318/*
319 * Print status of the jobs we know about. This includes rate estimates,
320 * ETA, thread state, etc.
321 */
322int calc_thread_status(struct jobs_eta *je, int force)
323{
324 struct thread_data *td;
325 int i, unified_rw_rep;
326 unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
327 unsigned long long io_bytes[DDIR_RWDIR_CNT];
328 unsigned long long io_iops[DDIR_RWDIR_CNT];
329 struct timeval now;
330
331 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
332 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
333 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
334 static struct timeval rate_prev_time, disp_prev_time;
335
336 if (!force) {
337 if (output_format != FIO_OUTPUT_NORMAL &&
338 f_out == stdout)
339 return 0;
340 if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
341 return 0;
342
343 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
344 return 0;
345 }
346
347 if (!ddir_rw_sum(rate_io_bytes))
348 fill_start_time(&rate_prev_time);
349 if (!ddir_rw_sum(disp_io_bytes))
350 fill_start_time(&disp_prev_time);
351
352 eta_secs = malloc(thread_number * sizeof(unsigned long));
353 memset(eta_secs, 0, thread_number * sizeof(unsigned long));
354
355 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
356
357 io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
358 io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
359 bw_avg_time = ULONG_MAX;
360 unified_rw_rep = 0;
361 for_each_td(td, i) {
362 unified_rw_rep += td->o.unified_rw_rep;
363 if (is_power_of_2(td->o.kb_base))
364 je->is_pow2 = 1;
365 je->unit_base = td->o.unit_base;
366 if (td->o.bw_avg_time < bw_avg_time)
367 bw_avg_time = td->o.bw_avg_time;
368 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
369 || td->runstate == TD_FSYNCING
370 || td->runstate == TD_PRE_READING
371 || td->runstate == TD_FINISHING) {
372 je->nr_running++;
373 if (td_read(td)) {
374 je->t_rate[0] += td->o.rate[DDIR_READ];
375 je->t_iops[0] += td->o.rate_iops[DDIR_READ];
376 je->m_rate[0] += td->o.ratemin[DDIR_READ];
377 je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
378 }
379 if (td_write(td)) {
380 je->t_rate[1] += td->o.rate[DDIR_WRITE];
381 je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
382 je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
383 je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
384 }
385 if (td_trim(td)) {
386 je->t_rate[2] += td->o.rate[DDIR_TRIM];
387 je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
388 je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
389 je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
390 }
391
392 je->files_open += td->nr_open_files;
393 } else if (td->runstate == TD_RAMP) {
394 je->nr_running++;
395 je->nr_ramp++;
396 } else if (td->runstate == TD_SETTING_UP)
397 je->nr_setting_up++;
398 else if (td->runstate < TD_RUNNING)
399 je->nr_pending++;
400
401 if (je->elapsed_sec >= 3)
402 eta_secs[i] = thread_eta(td);
403 else
404 eta_secs[i] = INT_MAX;
405
406 check_str_update(td);
407
408 if (td->runstate > TD_SETTING_UP) {
409 int ddir;
410
411 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
412 if (unified_rw_rep) {
413 io_bytes[0] += td->io_bytes[ddir];
414 io_iops[0] += td->io_blocks[ddir];
415 } else {
416 io_bytes[ddir] += td->io_bytes[ddir];
417 io_iops[ddir] += td->io_blocks[ddir];
418 }
419 }
420 }
421 }
422
423 if (exitall_on_terminate)
424 je->eta_sec = INT_MAX;
425 else
426 je->eta_sec = 0;
427
428 for_each_td(td, i) {
429 if (exitall_on_terminate) {
430 if (eta_secs[i] < je->eta_sec)
431 je->eta_sec = eta_secs[i];
432 } else {
433 if (eta_secs[i] > je->eta_sec)
434 je->eta_sec = eta_secs[i];
435 }
436 }
437
438 free(eta_secs);
439
440 fio_gettime(&now, NULL);
441 rate_time = mtime_since(&rate_prev_time, &now);
442
443 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
444 calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
445 je->rate);
446 memcpy(&rate_prev_time, &now, sizeof(now));
447 add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
448 add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
449 add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0);
450 }
451
452 disp_time = mtime_since(&disp_prev_time, &now);
453
454 /*
455 * Allow a little slack, the target is to print it every 1000 msecs
456 */
457 if (!force && disp_time < 900)
458 return 0;
459
460 calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
461 calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
462
463 memcpy(&disp_prev_time, &now, sizeof(now));
464
465 if (!force && !je->nr_running && !je->nr_pending)
466 return 0;
467
468 je->nr_threads = thread_number;
469 update_condensed_str(__run_str, run_str);
470 memcpy(je->run_str, run_str, strlen(run_str));
471 return 1;
472}
473
474void display_thread_status(struct jobs_eta *je)
475{
476 static struct timeval disp_eta_new_line;
477 static int eta_new_line_init, eta_new_line_pending;
478 static int linelen_last;
479 static int eta_good;
480 char output[REAL_MAX_JOBS + 512], *p = output;
481 char eta_str[128];
482 double perc = 0.0;
483
484 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
485 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
486 eta_to_str(eta_str, je->eta_sec);
487 }
488
489 if (eta_new_line_pending) {
490 eta_new_line_pending = 0;
491 p += sprintf(p, "\n");
492 }
493
494 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
495 if (je->m_rate[0] || je->m_rate[1] || je->t_rate[0] || je->t_rate[1]) {
496 char *tr, *mr;
497
498 mr = num2str(je->m_rate[0] + je->m_rate[1], 4, 0, je->is_pow2, 8);
499 tr = num2str(je->t_rate[0] + je->t_rate[1], 4, 0, je->is_pow2, 8);
500 p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
501 free(tr);
502 free(mr);
503 } else if (je->m_iops[0] || je->m_iops[1] || je->t_iops[0] || je->t_iops[1]) {
504 p += sprintf(p, ", CR=%d/%d IOPS",
505 je->t_iops[0] + je->t_iops[1],
506 je->m_iops[0] + je->m_iops[1]);
507 }
508 if (je->eta_sec != INT_MAX && je->nr_running) {
509 char perc_str[32];
510 char *iops_str[DDIR_RWDIR_CNT];
511 char *rate_str[DDIR_RWDIR_CNT];
512 size_t left;
513 int l;
514 int ddir;
515
516 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
517 strcpy(perc_str, "-.-% done");
518 else {
519 double mult = 100.0;
520
521 if (je->nr_setting_up && je->nr_running)
522 mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
523
524 eta_good = 1;
525 perc *= mult;
526 sprintf(perc_str, "%3.1f%% done", perc);
527 }
528
529 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
530 rate_str[ddir] = num2str(je->rate[ddir], 5,
531 1024, je->is_pow2, je->unit_base);
532 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, 0);
533 }
534
535 left = sizeof(output) - (p - output) - 1;
536
537 l = snprintf(p, left, ": [%s] [%s] [%s/%s/%s /s] [%s/%s/%s iops] [eta %s]",
538 je->run_str, perc_str, rate_str[DDIR_READ],
539 rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
540 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
541 iops_str[DDIR_TRIM], eta_str);
542 p += l;
543 if (l >= 0 && l < linelen_last)
544 p += sprintf(p, "%*s", linelen_last - l, "");
545 linelen_last = l;
546
547 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
548 free(rate_str[ddir]);
549 free(iops_str[ddir]);
550 }
551 }
552 p += sprintf(p, "\r");
553
554 printf("%s", output);
555
556 if (!eta_new_line_init) {
557 fio_gettime(&disp_eta_new_line, NULL);
558 eta_new_line_init = 1;
559 } else if (eta_new_line && mtime_since_now(&disp_eta_new_line) > eta_new_line) {
560 fio_gettime(&disp_eta_new_line, NULL);
561 eta_new_line_pending = 1;
562 }
563
564 fflush(stdout);
565}
566
567struct jobs_eta *get_jobs_eta(int force, size_t *size)
568{
569 struct jobs_eta *je;
570
571 if (!thread_number)
572 return NULL;
573
574 *size = sizeof(*je) + THREAD_RUNSTR_SZ;
575 je = malloc(*size);
576 if (!je)
577 return NULL;
578 memset(je, 0, *size);
579
580 if (!calc_thread_status(je, force)) {
581 free(je);
582 return NULL;
583 }
584
585 *size = sizeof(*je) + strlen((char *) je->run_str) + 1;
586 return je;
587}
588
589void print_thread_status(void)
590{
591 struct jobs_eta *je;
592 size_t size;
593
594 je = get_jobs_eta(0, &size);
595 if (je)
596 display_thread_status(je);
597
598 free(je);
599}
600
601void print_status_init(int thr_number)
602{
603 __run_str[thr_number] = 'P';
604 update_condensed_str(__run_str, run_str);
605}