windows: reduce block size used in posix_fallocate()
[fio.git] / eta.c
... / ...
CommitLineData
1/*
2 * Status and ETA code
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7
8#include "fio.h"
9
10static char run_str[REAL_MAX_JOBS + 1];
11
12/*
13 * Sets the status of the 'td' in the printed status map.
14 */
15static void check_str_update(struct thread_data *td)
16{
17 char c = run_str[td->thread_number - 1];
18
19 switch (td->runstate) {
20 case TD_REAPED:
21 if (td->error)
22 c = 'X';
23 else if (td->sig)
24 c = 'K';
25 else
26 c = '_';
27 break;
28 case TD_EXITED:
29 c = 'E';
30 break;
31 case TD_RAMP:
32 c = '/';
33 break;
34 case TD_RUNNING:
35 if (td_rw(td)) {
36 if (td_random(td)) {
37 if (td->o.rwmix[DDIR_READ] == 100)
38 c = 'r';
39 else if (td->o.rwmix[DDIR_WRITE] == 100)
40 c = 'w';
41 else
42 c = 'm';
43 } else {
44 if (td->o.rwmix[DDIR_READ] == 100)
45 c = 'R';
46 else if (td->o.rwmix[DDIR_WRITE] == 100)
47 c = 'W';
48 else
49 c = 'M';
50 }
51 } else if (td_read(td)) {
52 if (td_random(td))
53 c = 'r';
54 else
55 c = 'R';
56 } else {
57 if (td_random(td))
58 c = 'w';
59 else
60 c = 'W';
61 }
62 break;
63 case TD_PRE_READING:
64 c = 'p';
65 break;
66 case TD_VERIFYING:
67 c = 'V';
68 break;
69 case TD_FSYNCING:
70 c = 'F';
71 break;
72 case TD_CREATED:
73 c = 'C';
74 break;
75 case TD_INITIALIZED:
76 c = 'I';
77 break;
78 case TD_NOT_CREATED:
79 c = 'P';
80 break;
81 default:
82 log_err("state %d\n", td->runstate);
83 }
84
85 run_str[td->thread_number - 1] = c;
86}
87
88/*
89 * Convert seconds to a printable string.
90 */
91static void eta_to_str(char *str, unsigned long eta_sec)
92{
93 unsigned int d, h, m, s;
94 int disp_hour = 0;
95
96 s = eta_sec % 60;
97 eta_sec /= 60;
98 m = eta_sec % 60;
99 eta_sec /= 60;
100 h = eta_sec % 24;
101 eta_sec /= 24;
102 d = eta_sec;
103
104 if (d) {
105 disp_hour = 1;
106 str += sprintf(str, "%02ud:", d);
107 }
108
109 if (h || disp_hour)
110 str += sprintf(str, "%02uh:", h);
111
112 str += sprintf(str, "%02um:", m);
113 str += sprintf(str, "%02us", s);
114}
115
116/*
117 * Best effort calculation of the estimated pending runtime of a job.
118 */
119static int thread_eta(struct thread_data *td)
120{
121 unsigned long long bytes_total, bytes_done;
122 unsigned long eta_sec = 0;
123 unsigned long elapsed;
124
125 elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
126
127 bytes_total = td->total_io_size;
128
129 if (td->o.fill_device && td->o.size == -1ULL) {
130 if (!td->fill_device_size || td->fill_device_size == -1ULL)
131 return 0;
132
133 bytes_total = td->fill_device_size;
134 }
135
136 /*
137 * if writing, bytes_total will be twice the size. If mixing,
138 * assume a 50/50 split and thus bytes_total will be 50% larger.
139 */
140 if (td->o.do_verify && td->o.verify && td_write(td)) {
141 if (td_rw(td))
142 bytes_total = bytes_total * 3 / 2;
143 else
144 bytes_total <<= 1;
145 }
146
147 if (td->o.zone_size && td->o.zone_skip)
148 bytes_total /= (td->o.zone_skip / td->o.zone_size);
149
150 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
151 double perc, perc_t;
152
153 bytes_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE];
154 perc = (double) bytes_done / (double) bytes_total;
155 if (perc > 1.0)
156 perc = 1.0;
157
158 if (td->o.time_based) {
159 perc_t = (double) elapsed / (double) td->o.timeout;
160 if (perc_t < perc)
161 perc = perc_t;
162 }
163
164 eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
165
166 if (td->o.timeout &&
167 eta_sec > (td->o.timeout + done_secs - elapsed))
168 eta_sec = td->o.timeout + done_secs - elapsed;
169 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
170 || td->runstate == TD_INITIALIZED
171 || td->runstate == TD_RAMP
172 || td->runstate == TD_PRE_READING) {
173 int t_eta = 0, r_eta = 0;
174
175 /*
176 * We can only guess - assume it'll run the full timeout
177 * if given, otherwise assume it'll run at the specified rate.
178 */
179 if (td->o.timeout) {
180 t_eta = td->o.timeout + td->o.start_delay +
181 td->o.ramp_time;
182
183 if (in_ramp_time(td)) {
184 unsigned long ramp_left;
185
186 ramp_left = mtime_since_now(&td->epoch);
187 ramp_left = (ramp_left + 999) / 1000;
188 if (ramp_left <= t_eta)
189 t_eta -= ramp_left;
190 }
191 }
192 if (td->o.rate[0] || td->o.rate[1]) {
193 r_eta = (bytes_total / 1024) /
194 (td->o.rate[0] + td->o.rate[1]);
195 r_eta += td->o.start_delay;
196 }
197
198 if (r_eta && t_eta)
199 eta_sec = min(r_eta, t_eta);
200 else if (r_eta)
201 eta_sec = r_eta;
202 else if (t_eta)
203 eta_sec = t_eta;
204 else
205 eta_sec = 0;
206 } else {
207 /*
208 * thread is already done or waiting for fsync
209 */
210 eta_sec = 0;
211 }
212
213 return eta_sec;
214}
215
216static void calc_rate(unsigned long mtime, unsigned long long *io_bytes,
217 unsigned long long *prev_io_bytes, unsigned int *rate)
218{
219 int i;
220
221 for (i = 0; i <= DDIR_WRITE; i++) {
222 unsigned long long diff;
223
224 diff = io_bytes[i] - prev_io_bytes[i];
225 rate[i] = ((1000 * diff) / mtime) / 1024;
226 }
227 prev_io_bytes[0] = io_bytes[0];
228 prev_io_bytes[1] = io_bytes[1];
229}
230
231static void calc_iops(unsigned long mtime, unsigned long long *io_iops,
232 unsigned long long *prev_io_iops, unsigned int *iops)
233{
234 iops[0] = ((io_iops[0] - prev_io_iops[0]) * 1000) / mtime;
235 iops[1] = ((io_iops[1] - prev_io_iops[1]) * 1000) / mtime;
236 prev_io_iops[0] = io_iops[0];
237 prev_io_iops[1] = io_iops[1];
238}
239
240/*
241 * Print status of the jobs we know about. This includes rate estimates,
242 * ETA, thread state, etc.
243 */
244int calc_thread_status(struct jobs_eta *je, int force)
245{
246 struct thread_data *td;
247 int i;
248 unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
249 unsigned long long io_bytes[2];
250 unsigned long long io_iops[2];
251 struct timeval now;
252
253 static unsigned long long rate_io_bytes[2];
254 static unsigned long long disp_io_bytes[2];
255 static unsigned long long disp_io_iops[2];
256 static struct timeval rate_prev_time, disp_prev_time;
257
258 if (!force) {
259 if (temp_stall_ts || terse_output || eta_print == FIO_ETA_NEVER)
260 return 0;
261
262 if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
263 return 0;
264 }
265
266 if (!rate_io_bytes[0] && !rate_io_bytes[1])
267 fill_start_time(&rate_prev_time);
268 if (!disp_io_bytes[0] && !disp_io_bytes[1])
269 fill_start_time(&disp_prev_time);
270
271 eta_secs = malloc(thread_number * sizeof(unsigned long));
272 memset(eta_secs, 0, thread_number * sizeof(unsigned long));
273
274 je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
275
276 io_bytes[0] = io_bytes[1] = 0;
277 io_iops[0] = io_iops[1] = 0;
278 bw_avg_time = ULONG_MAX;
279 for_each_td(td, i) {
280 if (is_power_of_2(td->o.kb_base))
281 je->is_pow2 = 1;
282 if (td->o.bw_avg_time < bw_avg_time)
283 bw_avg_time = td->o.bw_avg_time;
284 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
285 || td->runstate == TD_FSYNCING
286 || td->runstate == TD_PRE_READING) {
287 je->nr_running++;
288 if (td_read(td)) {
289 je->t_rate += td->o.rate[DDIR_READ];
290 je->t_iops += td->o.rate_iops[DDIR_READ];
291 je->m_rate += td->o.ratemin[DDIR_READ];
292 je->m_iops += td->o.rate_iops_min[DDIR_READ];
293 }
294 if (td_write(td)) {
295 je->t_rate += td->o.rate[DDIR_WRITE];
296 je->t_iops += td->o.rate_iops[DDIR_WRITE];
297 je->m_rate += td->o.ratemin[DDIR_WRITE];
298 je->m_iops += td->o.rate_iops_min[DDIR_WRITE];
299 }
300 je->files_open += td->nr_open_files;
301 } else if (td->runstate == TD_RAMP) {
302 je->nr_running++;
303 je->nr_ramp++;
304 } else if (td->runstate < TD_RUNNING)
305 je->nr_pending++;
306
307 if (je->elapsed_sec >= 3)
308 eta_secs[i] = thread_eta(td);
309 else
310 eta_secs[i] = INT_MAX;
311
312 check_str_update(td);
313
314 if (td->runstate > TD_RAMP) {
315 io_bytes[0] += td->io_bytes[0];
316 io_bytes[1] += td->io_bytes[1];
317 io_iops[0] += td->io_blocks[0];
318 io_iops[1] += td->io_blocks[1];
319 }
320 }
321
322 if (exitall_on_terminate)
323 je->eta_sec = INT_MAX;
324 else
325 je->eta_sec = 0;
326
327 for_each_td(td, i) {
328 if (exitall_on_terminate) {
329 if (eta_secs[i] < je->eta_sec)
330 je->eta_sec = eta_secs[i];
331 } else {
332 if (eta_secs[i] > je->eta_sec)
333 je->eta_sec = eta_secs[i];
334 }
335 }
336
337 free(eta_secs);
338
339 fio_gettime(&now, NULL);
340 rate_time = mtime_since(&rate_prev_time, &now);
341
342 if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
343 calc_rate(rate_time, io_bytes, rate_io_bytes, je->rate);
344 memcpy(&rate_prev_time, &now, sizeof(now));
345 add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
346 add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
347 }
348
349 disp_time = mtime_since(&disp_prev_time, &now);
350
351 /*
352 * Allow a little slack, the target is to print it every 1000 msecs
353 */
354 if (!force && disp_time < 900)
355 return 0;
356
357 calc_rate(disp_time, io_bytes, disp_io_bytes, je->rate);
358 calc_iops(disp_time, io_iops, disp_io_iops, je->iops);
359
360 memcpy(&disp_prev_time, &now, sizeof(now));
361
362 if (!force && !je->nr_running && !je->nr_pending)
363 return 0;
364
365 je->nr_threads = thread_number;
366 memcpy(je->run_str, run_str, thread_number * sizeof(char));
367
368 return 1;
369}
370
371void display_thread_status(struct jobs_eta *je)
372{
373 static int linelen_last;
374 static int eta_good;
375 char output[REAL_MAX_JOBS + 512], *p = output;
376 char eta_str[128];
377 double perc = 0.0;
378
379 if (je->eta_sec != INT_MAX && je->elapsed_sec) {
380 perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
381 eta_to_str(eta_str, je->eta_sec);
382 }
383
384 p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
385 if (je->m_rate || je->t_rate) {
386 char *tr, *mr;
387
388 mr = num2str(je->m_rate, 4, 0, je->is_pow2);
389 tr = num2str(je->t_rate, 4, 0, je->is_pow2);
390 p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
391 free(tr);
392 free(mr);
393 } else if (je->m_iops || je->t_iops)
394 p += sprintf(p, ", CR=%d/%d IOPS", je->t_iops, je->m_iops);
395 if (je->eta_sec != INT_MAX && je->nr_running) {
396 char perc_str[32];
397 char *iops_str[2];
398 char *rate_str[2];
399 size_t left;
400 int l;
401
402 if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
403 strcpy(perc_str, "-.-% done");
404 else {
405 eta_good = 1;
406 perc *= 100.0;
407 sprintf(perc_str, "%3.1f%% done", perc);
408 }
409
410 rate_str[0] = num2str(je->rate[0], 5, 1024, je->is_pow2);
411 rate_str[1] = num2str(je->rate[1], 5, 1024, je->is_pow2);
412
413 iops_str[0] = num2str(je->iops[0], 4, 1, 0);
414 iops_str[1] = num2str(je->iops[1], 4, 1, 0);
415
416 left = sizeof(output) - (p - output) - 1;
417
418 l = snprintf(p, left, ": [%s] [%s] [%s/%s /s] [%s/%s iops] [eta %s]",
419 je->run_str, perc_str, rate_str[0],
420 rate_str[1], iops_str[0], iops_str[1], eta_str);
421 p += l;
422 if (l >= 0 && l < linelen_last)
423 p += sprintf(p, "%*s", linelen_last - l, "");
424 linelen_last = l;
425
426 free(rate_str[0]);
427 free(rate_str[1]);
428 free(iops_str[0]);
429 free(iops_str[1]);
430 }
431 p += sprintf(p, "\r");
432
433 printf("%s", output);
434 fflush(stdout);
435}
436
437void print_thread_status(void)
438{
439 struct jobs_eta *je;
440 size_t size;
441
442 if (!thread_number)
443 return;
444
445 size = sizeof(*je) + thread_number * sizeof(char) + 1;
446 je = malloc(size);
447 memset(je, 0, size);
448
449 if (calc_thread_status(je, 0))
450 display_thread_status(je);
451
452 free(je);
453}
454
455void print_status_init(int thr_number)
456{
457 run_str[thr_number] = 'P';
458}