Add ddir_rw_sum()
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7
8#include "fio.h"
9
10static char run_str[REAL_MAX_JOBS + 1];
11
12/*
13 * Sets the status of the 'td' in the printed status map.
14 */
15static void check_str_update(struct thread_data *td)
16{
17 char c = run_str[td->thread_number - 1];
18
19 switch (td->runstate) {
20 case TD_REAPED:
21 if (td->error)
22 c = 'X';
23 else if (td->sig)
24 c = 'K';
25 else
26 c = '_';
27 break;
28 case TD_EXITED:
29 c = 'E';
30 break;
31 case TD_RAMP:
32 c = '/';
33 break;
34 case TD_RUNNING:
35 if (td_rw(td)) {
36 if (td_random(td)) {
37 if (td->o.rwmix[DDIR_READ] == 100)
38 c = 'r';
39 else if (td->o.rwmix[DDIR_WRITE] == 100)
40 c = 'w';
41 else
42 c = 'm';
43 } else {
44 if (td->o.rwmix[DDIR_READ] == 100)
45 c = 'R';
46 else if (td->o.rwmix[DDIR_WRITE] == 100)
47 c = 'W';
48 else
49 c = 'M';
50 }
51 } else if (td_read(td)) {
52 if (td_random(td))
53 c = 'r';
54 else
55 c = 'R';
56 } else if (td_write(td)) {
57 if (td_random(td))
58 c = 'w';
59 else
60 c = 'W';
61 } else {
62 if (td_random(td))
63 c = 'd';
64 else
65 c = 'D';
66 }
67 break;
68 case TD_PRE_READING:
69 c = 'p';
70 break;
71 case TD_VERIFYING:
72 c = 'V';
73 break;
74 case TD_FSYNCING:
75 c = 'F';
76 break;
77 case TD_CREATED:
78 c = 'C';
79 break;
80 case TD_INITIALIZED:
81 c = 'I';
82 break;
83 case TD_NOT_CREATED:
84 c = 'P';
85 break;
86 default:
87 log_err("state %d\n", td->runstate);
88 }
89
90 run_str[td->thread_number - 1] = c;
91}
92
93/*
94 * Convert seconds to a printable string.
95 */
96static void eta_to_str(char *str, unsigned long eta_sec)
97{
98 unsigned int d, h, m, s;
99 int disp_hour = 0;
100
101 s = eta_sec % 60;
102 eta_sec /= 60;
103 m = eta_sec % 60;
104 eta_sec /= 60;
105 h = eta_sec % 24;
106 eta_sec /= 24;
107 d = eta_sec;
108
109 if (d) {
110 disp_hour = 1;
111 str += sprintf(str, "%02ud:", d);
112 }
113
114 if (h || disp_hour)
115 str += sprintf(str, "%02uh:", h);
116
117 str += sprintf(str, "%02um:", m);
118 str += sprintf(str, "%02us", s);
119}
120
121/*
122 * Best effort calculation of the estimated pending runtime of a job.
123 */
124static int thread_eta(struct thread_data *td)
125{
126 unsigned long long bytes_total, bytes_done;
127 unsigned long eta_sec = 0;
128 unsigned long elapsed;
129
130 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
131
132 bytes_total = td->total_io_size;
133
134 if (td->o.fill_device && td->o.size == -1ULL) {
135 if (!td->fill_device_size || td->fill_device_size == -1ULL)
136 return 0;
137
138 bytes_total = td->fill_device_size;
139 }
140
141 /*
142 * if writing, bytes_total will be twice the size. If mixing,
143 * assume a 50/50 split and thus bytes_total will be 50% larger.
144 */
145 if (td->o.do_verify && td->o.verify && td_write(td)) {
146 if (td_rw(td))
147 bytes_total = bytes_total * 3 / 2;
148 else
149 bytes_total <<= 1;
150 }
151
152 if (td->o.zone_size && td->o.zone_skip)
153 bytes_total /= (td->o.zone_skip / td->o.zone_size);
154
155 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
156 double perc, perc_t;
157
158 bytes_done = ddir_rw_sum(td->io_bytes);
159 perc = (double) bytes_done / (double) bytes_total;
160 if (perc > 1.0)
161 perc = 1.0;
162
163 if (td->o.time_based) {
164 perc_t = (double) elapsed / (double) td->o.timeout;
165 if (perc_t < perc)
166 perc = perc_t;
167 }
168
169 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
170
171 if (td->o.timeout &&
172 eta_sec > (td->o.timeout + done_secs - elapsed))
173 eta_sec = td->o.timeout + done_secs - elapsed;
174 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
175 || td->runstate == TD_INITIALIZED
176 || td->runstate == TD_RAMP
177 || td->runstate == TD_PRE_READING) {
178 int t_eta = 0, r_eta = 0;
179 unsigned long long rate_bytes;
180
181 /*
182 * We can only guess - assume it'll run the full timeout
183 * if given, otherwise assume it'll run at the specified rate.
184 */
185 if (td->o.timeout) {
186 t_eta = td->o.timeout + td->o.start_delay +
187 td->o.ramp_time;
188
189 if (in_ramp_time(td)) {
190 unsigned long ramp_left;
191
192 ramp_left = mtime_since_now(&td->epoch);
193 ramp_left = (ramp_left + 999) / 1000;
194 if (ramp_left <= t_eta)
195 t_eta -= ramp_left;
196 }
197 }
198 rate_bytes = ddir_rw_sum(td->o.rate);
199 if (rate_bytes) {
200 r_eta = (bytes_total / 1024) / rate_bytes;
201 r_eta += td->o.start_delay;
202 }
203
204 if (r_eta && t_eta)
205 eta_sec = min(r_eta, t_eta);
206 else if (r_eta)
207 eta_sec = r_eta;
208 else if (t_eta)
209 eta_sec = t_eta;
210 else
211 eta_sec = 0;
212 } else {
213 /*
214 * thread is already done or waiting for fsync
215 */
216 eta_sec = 0;
217 }
218
219 return eta_sec;
220}
221
222static void calc_rate(unsigned long mtime, unsigned long long *io_bytes,
223 unsigned long long *prev_io_bytes, unsigned int *rate)
224{
225 int i;
226
227 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
228 unsigned long long diff;
229
230 diff = io_bytes[i] - prev_io_bytes[i];
231 rate[i] = ((1000 * diff) / mtime) / 1024;
232
233 prev_io_bytes[i] = io_bytes[i];
234 }
235}
236
237static void calc_iops(unsigned long mtime, unsigned long long *io_iops,
238 unsigned long long *prev_io_iops, unsigned int *iops)
239{
240 int i;
241
242 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
243 iops[i] = ((io_iops[i] - prev_io_iops[i]) * 1000) / mtime;
244 prev_io_iops[i] = io_iops[i];
245 }
246}
247
248/*
249 * Print status of the jobs we know about. This includes rate estimates,
250 * ETA, thread state, etc.
251 */
252int calc_thread_status(struct jobs_eta *je, int force)
253{
254 struct thread_data *td;
255 int i;
256 unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
257 unsigned long long io_bytes[DDIR_RWDIR_CNT];
258 unsigned long long io_iops[DDIR_RWDIR_CNT];
259 struct timeval now;
260
261 static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
262 static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
263 static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
264 static struct timeval rate_prev_time, disp_prev_time;
265
266 if (!force) {
267 if (temp_stall_ts || terse_output || eta_print == FIO_ETA_NEVER)
268 return 0;
269
270 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
271 return 0;
272 }
273
274 if (!ddir_rw_sum(rate_io_bytes))
275 fill_start_time(&rate_prev_time);
276 if (!ddir_rw_sum(disp_io_bytes))
277 fill_start_time(&disp_prev_time);
278
279 eta_secs = malloc(thread_number * sizeof(unsigned long));
280 memset(eta_secs, 0, thread_number * sizeof(unsigned long));
281
282 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
283
284 io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
285 io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
286 bw_avg_time = ULONG_MAX;
287 for_each_td(td, i) {
288 if (is_power_of_2(td->o.kb_base))
289 je->is_pow2 = 1;
290 if (td->o.bw_avg_time < bw_avg_time)
291 bw_avg_time = td->o.bw_avg_time;
292 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
293 || td->runstate == TD_FSYNCING
294 || td->runstate == TD_PRE_READING) {
295 je->nr_running++;
296 if (td_read(td)) {
297 je->t_rate += td->o.rate[DDIR_READ];
298 je->t_iops += td->o.rate_iops[DDIR_READ];
299 je->m_rate += td->o.ratemin[DDIR_READ];
300 je->m_iops += td->o.rate_iops_min[DDIR_READ];
301 }
302 if (td_write(td)) {
303 je->t_rate += td->o.rate[DDIR_WRITE];
304 je->t_iops += td->o.rate_iops[DDIR_WRITE];
305 je->m_rate += td->o.ratemin[DDIR_WRITE];
306 je->m_iops += td->o.rate_iops_min[DDIR_WRITE];
307 }
308 if (td_trim(td)) {
309 je->t_rate += td->o.rate[DDIR_TRIM];
310 je->t_iops += td->o.rate_iops[DDIR_TRIM];
311 je->m_rate += td->o.ratemin[DDIR_TRIM];
312 je->m_iops += td->o.rate_iops_min[DDIR_TRIM];
313 }
314
315 je->files_open += td->nr_open_files;
316 } else if (td->runstate == TD_RAMP) {
317 je->nr_running++;
318 je->nr_ramp++;
319 } else if (td->runstate < TD_RUNNING)
320 je->nr_pending++;
321
322 if (je->elapsed_sec >= 3)
323 eta_secs[i] = thread_eta(td);
324 else
325 eta_secs[i] = INT_MAX;
326
327 check_str_update(td);
328
329 if (td->runstate > TD_RAMP) {
330 int ddir;
331 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
332 io_bytes[ddir] += td->io_bytes[ddir];
333 io_iops[ddir] += td->io_blocks[ddir];
334 }
335 }
336 }
337
338 if (exitall_on_terminate)
339 je->eta_sec = INT_MAX;
340 else
341 je->eta_sec = 0;
342
343 for_each_td(td, i) {
344 if (exitall_on_terminate) {
345 if (eta_secs[i] < je->eta_sec)
346 je->eta_sec = eta_secs[i];
347 } else {
348 if (eta_secs[i] > je->eta_sec)
349 je->eta_sec = eta_secs[i];
350 }
351 }
352
353 free(eta_secs);
354
355 fio_gettime(&now, NULL);
356 rate_time = mtime_since(&rate_prev_time, &now);
357
358 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
359 calc_rate(rate_time, io_bytes, rate_io_bytes, je->rate);
360 memcpy(&rate_prev_time, &now, sizeof(now));
361 add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
362 add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
363 add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0);
364 }
365
366 disp_time = mtime_since(&disp_prev_time, &now);
367
368 /*
369 * Allow a little slack, the target is to print it every 1000 msecs
370 */
371 if (!force && disp_time < 900)
372 return 0;
373
374 calc_rate(disp_time, io_bytes, disp_io_bytes, je->rate);
375 calc_iops(disp_time, io_iops, disp_io_iops, je->iops);
376
377 memcpy(&disp_prev_time, &now, sizeof(now));
378
379 if (!force && !je->nr_running && !je->nr_pending)
380 return 0;
381
382 je->nr_threads = thread_number;
383 memcpy(je->run_str, run_str, thread_number * sizeof(char));
384
385 return 1;
386}
387
388void display_thread_status(struct jobs_eta *je)
389{
390 static int linelen_last;
391 static int eta_good;
392 char output[REAL_MAX_JOBS + 512], *p = output;
393 char eta_str[128];
394 double perc = 0.0;
395
396 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
397 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
398 eta_to_str(eta_str, je->eta_sec);
399 }
400
401 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
402 if (je->m_rate || je->t_rate) {
403 char *tr, *mr;
404
405 mr = num2str(je->m_rate, 4, 0, je->is_pow2);
406 tr = num2str(je->t_rate, 4, 0, je->is_pow2);
407 p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
408 free(tr);
409 free(mr);
410 } else if (je->m_iops || je->t_iops)
411 p += sprintf(p, ", CR=%d/%d IOPS", je->t_iops, je->m_iops);
412 if (je->eta_sec != INT_MAX && je->nr_running) {
413 char perc_str[32];
414 char *iops_str[DDIR_RWDIR_CNT];
415 char *rate_str[DDIR_RWDIR_CNT];
416 size_t left;
417 int l;
418 int ddir;
419
420 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
421 strcpy(perc_str, "-.-% done");
422 else {
423 eta_good = 1;
424 perc *= 100.0;
425 sprintf(perc_str, "%3.1f%% done", perc);
426 }
427
428 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
429 rate_str[ddir] = num2str(je->rate[ddir], 5,
430 1024, je->is_pow2);
431 iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0);
432 }
433
434 left = sizeof(output) - (p - output) - 1;
435
436 l = snprintf(p, left, ": [%s] [%s] [%s/%s/%s /s] [%s/%s/%s iops] [eta %s]",
437 je->run_str, perc_str, rate_str[DDIR_READ],
438 rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
439 iops_str[DDIR_READ], iops_str[DDIR_WRITE],
440 iops_str[DDIR_TRIM], eta_str);
441 p += l;
442 if (l >= 0 && l < linelen_last)
443 p += sprintf(p, "%*s", linelen_last - l, "");
444 linelen_last = l;
445
446 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
447 free(rate_str[ddir]);
448 free(iops_str[ddir]);
449 }
450 }
451 p += sprintf(p, "\r");
452
453 printf("%s", output);
454 fflush(stdout);
455}
456
457void print_thread_status(void)
458{
459 struct jobs_eta *je;
460 size_t size;
461
462 if (!thread_number)
463 return;
464
465 size = sizeof(*je) + thread_number * sizeof(char) + 1;
466 je = malloc(size);
467 memset(je, 0, size);
468
469 if (calc_thread_status(je, 0))
470 display_thread_status(je);
471
472 free(je);
473}
474
475void print_status_init(int thr_number)
476{
477 run_str[thr_number] = 'P';
478}