io_u: re-invalidate cache when looping around without file open/close
[fio.git] / stat.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <string.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <dirent.h>
7#include <libgen.h>
8#include <math.h>
9
10#include "fio.h"
11#include "diskutil.h"
12#include "lib/ieee754.h"
13#include "json.h"
14#include "lib/getrusage.h"
15#include "idletime.h"
16#include "lib/pow2.h"
17#include "lib/output_buffer.h"
18#include "helper_thread.h"
19#include "smalloc.h"
20
21#define LOG_MSEC_SLACK 10
22
23struct fio_mutex *stat_mutex;
24
25void clear_rusage_stat(struct thread_data *td)
26{
27 struct thread_stat *ts = &td->ts;
28
29 fio_getrusage(&td->ru_start);
30 ts->usr_time = ts->sys_time = 0;
31 ts->ctx = 0;
32 ts->minf = ts->majf = 0;
33}
34
35void update_rusage_stat(struct thread_data *td)
36{
37 struct thread_stat *ts = &td->ts;
38
39 fio_getrusage(&td->ru_end);
40 ts->usr_time += mtime_since_tv(&td->ru_start.ru_utime,
41 &td->ru_end.ru_utime);
42 ts->sys_time += mtime_since_tv(&td->ru_start.ru_stime,
43 &td->ru_end.ru_stime);
44 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
45 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
46 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
47 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
48
49 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
50}
51
52/*
53 * Given a latency, return the index of the corresponding bucket in
54 * the structure tracking percentiles.
55 *
56 * (1) find the group (and error bits) that the value (latency)
57 * belongs to by looking at its MSB. (2) find the bucket number in the
58 * group by looking at the index bits.
59 *
60 */
61static unsigned int plat_val_to_idx(unsigned long long val)
62{
63 unsigned int msb, error_bits, base, offset, idx;
64
65 /* Find MSB starting from bit 0 */
66 if (val == 0)
67 msb = 0;
68 else
69 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
70
71 /*
72 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
73 * all bits of the sample as index
74 */
75 if (msb <= FIO_IO_U_PLAT_BITS)
76 return val;
77
78 /* Compute the number of error bits to discard*/
79 error_bits = msb - FIO_IO_U_PLAT_BITS;
80
81 /* Compute the number of buckets before the group */
82 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
83
84 /*
85 * Discard the error bits and apply the mask to find the
86 * index for the buckets in the group
87 */
88 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
89
90 /* Make sure the index does not exceed (array size - 1) */
91 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
92 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
93
94 return idx;
95}
96
97/*
98 * Convert the given index of the bucket array to the value
99 * represented by the bucket
100 */
101static unsigned long long plat_idx_to_val(unsigned int idx)
102{
103 unsigned int error_bits;
104 unsigned long long k, base;
105
106 assert(idx < FIO_IO_U_PLAT_NR);
107
108 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
109 * all bits of the sample as index */
110 if (idx < (FIO_IO_U_PLAT_VAL << 1))
111 return idx;
112
113 /* Find the group and compute the minimum value of that group */
114 error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
115 base = ((unsigned long long) 1) << (error_bits + FIO_IO_U_PLAT_BITS);
116
117 /* Find its bucket number of the group */
118 k = idx % FIO_IO_U_PLAT_VAL;
119
120 /* Return the mean of the range of the bucket */
121 return base + ((k + 0.5) * (1 << error_bits));
122}
123
124static int double_cmp(const void *a, const void *b)
125{
126 const fio_fp64_t fa = *(const fio_fp64_t *) a;
127 const fio_fp64_t fb = *(const fio_fp64_t *) b;
128 int cmp = 0;
129
130 if (fa.u.f > fb.u.f)
131 cmp = 1;
132 else if (fa.u.f < fb.u.f)
133 cmp = -1;
134
135 return cmp;
136}
137
138unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long long nr,
139 fio_fp64_t *plist, unsigned long long **output,
140 unsigned long long *maxv, unsigned long long *minv)
141{
142 unsigned long long sum = 0;
143 unsigned int len, i, j = 0;
144 unsigned int oval_len = 0;
145 unsigned long long *ovals = NULL;
146 bool is_last;
147
148 *minv = -1ULL;
149 *maxv = 0;
150
151 len = 0;
152 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
153 len++;
154
155 if (!len)
156 return 0;
157
158 /*
159 * Sort the percentile list. Note that it may already be sorted if
160 * we are using the default values, but since it's a short list this
161 * isn't a worry. Also note that this does not work for NaN values.
162 */
163 if (len > 1)
164 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
165
166 /*
167 * Calculate bucket values, note down max and min values
168 */
169 is_last = false;
170 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
171 sum += io_u_plat[i];
172 while (sum >= (plist[j].u.f / 100.0 * nr)) {
173 assert(plist[j].u.f <= 100.0);
174
175 if (j == oval_len) {
176 oval_len += 100;
177 ovals = realloc(ovals, oval_len * sizeof(*ovals));
178 }
179
180 ovals[j] = plat_idx_to_val(i);
181 if (ovals[j] < *minv)
182 *minv = ovals[j];
183 if (ovals[j] > *maxv)
184 *maxv = ovals[j];
185
186 is_last = (j == len - 1) != 0;
187 if (is_last)
188 break;
189
190 j++;
191 }
192 }
193
194 *output = ovals;
195 return len;
196}
197
198/*
199 * Find and display the p-th percentile of clat
200 */
201static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long long nr,
202 fio_fp64_t *plist, unsigned int precision,
203 bool is_clat, struct buf_output *out)
204{
205 unsigned int divisor, len, i, j = 0;
206 unsigned long long minv, maxv;
207 unsigned long long *ovals;
208 int per_line, scale_down, time_width;
209 const char *pre = is_clat ? "clat" : " lat";
210 bool is_last;
211 char fmt[32];
212
213 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
214 if (!len)
215 goto out;
216
217 /*
218 * We default to nsecs, but if the value range is such that we
219 * should scale down to usecs or msecs, do that.
220 */
221 if (minv > 2000000 && maxv > 99999999ULL) {
222 scale_down = 2;
223 divisor = 1000000;
224 log_buf(out, " %s percentiles (msec):\n |", pre);
225 } else if (minv > 2000 && maxv > 99999) {
226 scale_down = 1;
227 divisor = 1000;
228 log_buf(out, " %s percentiles (usec):\n |", pre);
229 } else {
230 scale_down = 0;
231 divisor = 1;
232 log_buf(out, " %s percentiles (nsec):\n |", pre);
233 }
234
235
236 time_width = max(5, (int) (log10(maxv / divisor) + 1));
237 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
238 precision, time_width);
239 /* fmt will be something like " %5.2fth=[%4llu]%c" */
240 per_line = (80 - 7) / (precision + 10 + time_width);
241
242 for (j = 0; j < len; j++) {
243 /* for formatting */
244 if (j != 0 && (j % per_line) == 0)
245 log_buf(out, " |");
246
247 /* end of the list */
248 is_last = (j == len - 1) != 0;
249
250 for (i = 0; i < scale_down; i++)
251 ovals[j] = (ovals[j] + 999) / 1000;
252
253 log_buf(out, fmt, plist[j].u.f, ovals[j], is_last ? '\n' : ',');
254
255 if (is_last)
256 break;
257
258 if ((j % per_line) == per_line - 1) /* for formatting */
259 log_buf(out, "\n");
260 }
261
262out:
263 if (ovals)
264 free(ovals);
265}
266
267bool calc_lat(struct io_stat *is, unsigned long long *min,
268 unsigned long long *max, double *mean, double *dev)
269{
270 double n = (double) is->samples;
271
272 if (n == 0)
273 return false;
274
275 *min = is->min_val;
276 *max = is->max_val;
277 *mean = is->mean.u.f;
278
279 if (n > 1.0)
280 *dev = sqrt(is->S.u.f / (n - 1.0));
281 else
282 *dev = 0;
283
284 return true;
285}
286
287void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
288{
289 char *io, *agg, *min, *max;
290 char *ioalt, *aggalt, *minalt, *maxalt;
291 const char *str[] = { " READ", " WRITE" , " TRIM"};
292 int i;
293
294 log_buf(out, "\nRun status group %d (all jobs):\n", rs->groupid);
295
296 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
297 const int i2p = is_power_of_2(rs->kb_base);
298
299 if (!rs->max_run[i])
300 continue;
301
302 io = num2str(rs->iobytes[i], 4, 1, i2p, N2S_BYTE);
303 ioalt = num2str(rs->iobytes[i], 4, 1, !i2p, N2S_BYTE);
304 agg = num2str(rs->agg[i], 4, 1, i2p, rs->unit_base);
305 aggalt = num2str(rs->agg[i], 4, 1, !i2p, rs->unit_base);
306 min = num2str(rs->min_bw[i], 4, 1, i2p, rs->unit_base);
307 minalt = num2str(rs->min_bw[i], 4, 1, !i2p, rs->unit_base);
308 max = num2str(rs->max_bw[i], 4, 1, i2p, rs->unit_base);
309 maxalt = num2str(rs->max_bw[i], 4, 1, !i2p, rs->unit_base);
310 log_buf(out, "%s: bw=%s (%s), %s-%s (%s-%s), io=%s (%s), run=%llu-%llumsec\n",
311 rs->unified_rw_rep ? " MIXED" : str[i],
312 agg, aggalt, min, max, minalt, maxalt, io, ioalt,
313 (unsigned long long) rs->min_run[i],
314 (unsigned long long) rs->max_run[i]);
315
316 free(io);
317 free(agg);
318 free(min);
319 free(max);
320 free(ioalt);
321 free(aggalt);
322 free(minalt);
323 free(maxalt);
324 }
325}
326
327void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
328{
329 int i;
330
331 /*
332 * Do depth distribution calculations
333 */
334 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
335 if (total) {
336 io_u_dist[i] = (double) map[i] / (double) total;
337 io_u_dist[i] *= 100.0;
338 if (io_u_dist[i] < 0.1 && map[i])
339 io_u_dist[i] = 0.1;
340 } else
341 io_u_dist[i] = 0.0;
342 }
343}
344
345static void stat_calc_lat(struct thread_stat *ts, double *dst,
346 unsigned int *src, int nr)
347{
348 unsigned long total = ddir_rw_sum(ts->total_io_u);
349 int i;
350
351 /*
352 * Do latency distribution calculations
353 */
354 for (i = 0; i < nr; i++) {
355 if (total) {
356 dst[i] = (double) src[i] / (double) total;
357 dst[i] *= 100.0;
358 if (dst[i] < 0.01 && src[i])
359 dst[i] = 0.01;
360 } else
361 dst[i] = 0.0;
362 }
363}
364
365/*
366 * To keep the terse format unaltered, add all of the ns latency
367 * buckets to the first us latency bucket
368 */
369void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u)
370{
371 unsigned long ntotal = 0, total = ddir_rw_sum(ts->total_io_u);
372 int i;
373
374 stat_calc_lat(ts, io_u_lat_u, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
375
376 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
377 ntotal += ts->io_u_lat_n[i];
378
379 io_u_lat_u[0] += 100.0 * (double) ntotal / (double) total;
380}
381
382void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat)
383{
384 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_n, FIO_IO_U_LAT_N_NR);
385}
386
387void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
388{
389 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
390}
391
392void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
393{
394 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
395}
396
397static void display_lat(const char *name, unsigned long long min,
398 unsigned long long max, double mean, double dev,
399 struct buf_output *out)
400{
401 const char *base = "(nsec)";
402 char *minp, *maxp;
403
404 if (nsec_to_msec(&min, &max, &mean, &dev))
405 base = "(msec)";
406 else if (nsec_to_usec(&min, &max, &mean, &dev))
407 base = "(usec)";
408
409 minp = num2str(min, 6, 1, 0, N2S_NONE);
410 maxp = num2str(max, 6, 1, 0, N2S_NONE);
411
412 log_buf(out, " %s %s: min=%s, max=%s, avg=%5.02f,"
413 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
414
415 free(minp);
416 free(maxp);
417}
418
419static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
420 int ddir, struct buf_output *out)
421{
422 const char *str[] = { " read", "write", " trim" };
423 unsigned long runt;
424 unsigned long long min, max, bw, iops;
425 double mean, dev;
426 char *io_p, *bw_p, *bw_p_alt, *iops_p;
427 int i2p;
428
429 assert(ddir_rw(ddir));
430
431 if (!ts->runtime[ddir])
432 return;
433
434 i2p = is_power_of_2(rs->kb_base);
435 runt = ts->runtime[ddir];
436
437 bw = (1000 * ts->io_bytes[ddir]) / runt;
438 io_p = num2str(ts->io_bytes[ddir], 4, 1, i2p, N2S_BYTE);
439 bw_p = num2str(bw, 4, 1, i2p, ts->unit_base);
440 bw_p_alt = num2str(bw, 4, 1, !i2p, ts->unit_base);
441
442 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
443 iops_p = num2str(iops, 4, 1, 0, N2S_NONE);
444
445 log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)\n",
446 rs->unified_rw_rep ? "mixed" : str[ddir],
447 iops_p, bw_p, bw_p_alt, io_p,
448 (unsigned long long) ts->runtime[ddir]);
449
450 free(io_p);
451 free(bw_p);
452 free(bw_p_alt);
453 free(iops_p);
454
455 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
456 display_lat("slat", min, max, mean, dev, out);
457 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
458 display_lat("clat", min, max, mean, dev, out);
459 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
460 display_lat(" lat", min, max, mean, dev, out);
461
462 if (ts->clat_percentiles || ts->lat_percentiles) {
463 show_clat_percentiles(ts->io_u_plat[ddir],
464 ts->clat_stat[ddir].samples,
465 ts->percentile_list,
466 ts->percentile_precision,
467 ts->clat_percentiles, out);
468 }
469 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
470 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
471 const char *bw_str;
472
473 if ((rs->unit_base == 1) && i2p)
474 bw_str = "Kibit";
475 else if (rs->unit_base == 1)
476 bw_str = "kbit";
477 else if (i2p)
478 bw_str = "KiB";
479 else
480 bw_str = "kB";
481
482 if (rs->agg[ddir]) {
483 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
484 if (p_of_agg > 100.0)
485 p_of_agg = 100.0;
486 }
487
488 if (rs->unit_base == 1) {
489 min *= 8.0;
490 max *= 8.0;
491 mean *= 8.0;
492 dev *= 8.0;
493 }
494
495 if (mean > fkb_base * fkb_base) {
496 min /= fkb_base;
497 max /= fkb_base;
498 mean /= fkb_base;
499 dev /= fkb_base;
500 bw_str = (rs->unit_base == 1 ? "Mibit" : "MiB");
501 }
502
503 log_buf(out, " bw (%5s/s): min=%5llu, max=%5llu, per=%3.2f%%, "
504 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
505 bw_str, min, max, p_of_agg, mean, dev,
506 (&ts->bw_stat[ddir])->samples);
507 }
508 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
509 log_buf(out, " iops : min=%5llu, max=%5llu, "
510 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
511 min, max, mean, dev, (&ts->iops_stat[ddir])->samples);
512 }
513}
514
515static bool show_lat(double *io_u_lat, int nr, const char **ranges,
516 const char *msg, struct buf_output *out)
517{
518 bool new_line = true, shown = false;
519 int i, line = 0;
520
521 for (i = 0; i < nr; i++) {
522 if (io_u_lat[i] <= 0.0)
523 continue;
524 shown = true;
525 if (new_line) {
526 if (line)
527 log_buf(out, "\n");
528 log_buf(out, " lat (%s) : ", msg);
529 new_line = false;
530 line = 0;
531 }
532 if (line)
533 log_buf(out, ", ");
534 log_buf(out, "%s%3.2f%%", ranges[i], io_u_lat[i]);
535 line++;
536 if (line == 5)
537 new_line = true;
538 }
539
540 if (shown)
541 log_buf(out, "\n");
542
543 return true;
544}
545
546static void show_lat_n(double *io_u_lat_n, struct buf_output *out)
547{
548 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
549 "250=", "500=", "750=", "1000=", };
550
551 show_lat(io_u_lat_n, FIO_IO_U_LAT_N_NR, ranges, "nsec", out);
552}
553
554static void show_lat_u(double *io_u_lat_u, struct buf_output *out)
555{
556 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
557 "250=", "500=", "750=", "1000=", };
558
559 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec", out);
560}
561
562static void show_lat_m(double *io_u_lat_m, struct buf_output *out)
563{
564 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
565 "250=", "500=", "750=", "1000=", "2000=",
566 ">=2000=", };
567
568 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec", out);
569}
570
571static void show_latencies(struct thread_stat *ts, struct buf_output *out)
572{
573 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
574 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
575 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
576
577 stat_calc_lat_n(ts, io_u_lat_n);
578 stat_calc_lat_u(ts, io_u_lat_u);
579 stat_calc_lat_m(ts, io_u_lat_m);
580
581 show_lat_n(io_u_lat_n, out);
582 show_lat_u(io_u_lat_u, out);
583 show_lat_m(io_u_lat_m, out);
584}
585
586static int block_state_category(int block_state)
587{
588 switch (block_state) {
589 case BLOCK_STATE_UNINIT:
590 return 0;
591 case BLOCK_STATE_TRIMMED:
592 case BLOCK_STATE_WRITTEN:
593 return 1;
594 case BLOCK_STATE_WRITE_FAILURE:
595 case BLOCK_STATE_TRIM_FAILURE:
596 return 2;
597 default:
598 /* Silence compile warning on some BSDs and have a return */
599 assert(0);
600 return -1;
601 }
602}
603
604static int compare_block_infos(const void *bs1, const void *bs2)
605{
606 uint32_t block1 = *(uint32_t *)bs1;
607 uint32_t block2 = *(uint32_t *)bs2;
608 int state1 = BLOCK_INFO_STATE(block1);
609 int state2 = BLOCK_INFO_STATE(block2);
610 int bscat1 = block_state_category(state1);
611 int bscat2 = block_state_category(state2);
612 int cycles1 = BLOCK_INFO_TRIMS(block1);
613 int cycles2 = BLOCK_INFO_TRIMS(block2);
614
615 if (bscat1 < bscat2)
616 return -1;
617 if (bscat1 > bscat2)
618 return 1;
619
620 if (cycles1 < cycles2)
621 return -1;
622 if (cycles1 > cycles2)
623 return 1;
624
625 if (state1 < state2)
626 return -1;
627 if (state1 > state2)
628 return 1;
629
630 assert(block1 == block2);
631 return 0;
632}
633
634static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
635 fio_fp64_t *plist, unsigned int **percentiles,
636 unsigned int *types)
637{
638 int len = 0;
639 int i, nr_uninit;
640
641 qsort(block_infos, nr_block_infos, sizeof(uint32_t), compare_block_infos);
642
643 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
644 len++;
645
646 if (!len)
647 return 0;
648
649 /*
650 * Sort the percentile list. Note that it may already be sorted if
651 * we are using the default values, but since it's a short list this
652 * isn't a worry. Also note that this does not work for NaN values.
653 */
654 if (len > 1)
655 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
656
657 nr_uninit = 0;
658 /* Start only after the uninit entries end */
659 for (nr_uninit = 0;
660 nr_uninit < nr_block_infos
661 && BLOCK_INFO_STATE(block_infos[nr_uninit]) == BLOCK_STATE_UNINIT;
662 nr_uninit ++)
663 ;
664
665 if (nr_uninit == nr_block_infos)
666 return 0;
667
668 *percentiles = calloc(len, sizeof(**percentiles));
669
670 for (i = 0; i < len; i++) {
671 int idx = (plist[i].u.f * (nr_block_infos - nr_uninit) / 100)
672 + nr_uninit;
673 (*percentiles)[i] = BLOCK_INFO_TRIMS(block_infos[idx]);
674 }
675
676 memset(types, 0, sizeof(*types) * BLOCK_STATE_COUNT);
677 for (i = 0; i < nr_block_infos; i++)
678 types[BLOCK_INFO_STATE(block_infos[i])]++;
679
680 return len;
681}
682
683static const char *block_state_names[] = {
684 [BLOCK_STATE_UNINIT] = "unwritten",
685 [BLOCK_STATE_TRIMMED] = "trimmed",
686 [BLOCK_STATE_WRITTEN] = "written",
687 [BLOCK_STATE_TRIM_FAILURE] = "trim failure",
688 [BLOCK_STATE_WRITE_FAILURE] = "write failure",
689};
690
691static void show_block_infos(int nr_block_infos, uint32_t *block_infos,
692 fio_fp64_t *plist, struct buf_output *out)
693{
694 int len, pos, i;
695 unsigned int *percentiles = NULL;
696 unsigned int block_state_counts[BLOCK_STATE_COUNT];
697
698 len = calc_block_percentiles(nr_block_infos, block_infos, plist,
699 &percentiles, block_state_counts);
700
701 log_buf(out, " block lifetime percentiles :\n |");
702 pos = 0;
703 for (i = 0; i < len; i++) {
704 uint32_t block_info = percentiles[i];
705#define LINE_LENGTH 75
706 char str[LINE_LENGTH];
707 int strln = snprintf(str, LINE_LENGTH, " %3.2fth=%u%c",
708 plist[i].u.f, block_info,
709 i == len - 1 ? '\n' : ',');
710 assert(strln < LINE_LENGTH);
711 if (pos + strln > LINE_LENGTH) {
712 pos = 0;
713 log_buf(out, "\n |");
714 }
715 log_buf(out, "%s", str);
716 pos += strln;
717#undef LINE_LENGTH
718 }
719 if (percentiles)
720 free(percentiles);
721
722 log_buf(out, " states :");
723 for (i = 0; i < BLOCK_STATE_COUNT; i++)
724 log_buf(out, " %s=%u%c",
725 block_state_names[i], block_state_counts[i],
726 i == BLOCK_STATE_COUNT - 1 ? '\n' : ',');
727}
728
729static void show_ss_normal(struct thread_stat *ts, struct buf_output *out)
730{
731 char *p1, *p1alt, *p2;
732 unsigned long long bw_mean, iops_mean;
733 const int i2p = is_power_of_2(ts->kb_base);
734
735 if (!ts->ss_dur)
736 return;
737
738 bw_mean = steadystate_bw_mean(ts);
739 iops_mean = steadystate_iops_mean(ts);
740
741 p1 = num2str(bw_mean / ts->kb_base, 4, ts->kb_base, i2p, ts->unit_base);
742 p1alt = num2str(bw_mean / ts->kb_base, 4, ts->kb_base, !i2p, ts->unit_base);
743 p2 = num2str(iops_mean, 4, 1, 0, N2S_NONE);
744
745 log_buf(out, " steadystate : attained=%s, bw=%s (%s), iops=%s, %s%s=%.3f%s\n",
746 ts->ss_state & __FIO_SS_ATTAINED ? "yes" : "no",
747 p1, p1alt, p2,
748 ts->ss_state & __FIO_SS_IOPS ? "iops" : "bw",
749 ts->ss_state & __FIO_SS_SLOPE ? " slope": " mean dev",
750 ts->ss_criterion.u.f,
751 ts->ss_state & __FIO_SS_PCT ? "%" : "");
752
753 free(p1);
754 free(p1alt);
755 free(p2);
756}
757
758static void show_thread_status_normal(struct thread_stat *ts,
759 struct group_run_stats *rs,
760 struct buf_output *out)
761{
762 double usr_cpu, sys_cpu;
763 unsigned long runtime;
764 double io_u_dist[FIO_IO_U_MAP_NR];
765 time_t time_p;
766 char time_buf[32];
767
768 if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u))
769 return;
770
771 memset(time_buf, 0, sizeof(time_buf));
772
773 time(&time_p);
774 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
775
776 if (!ts->error) {
777 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
778 ts->name, ts->groupid, ts->members,
779 ts->error, (int) ts->pid, time_buf);
780 } else {
781 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
782 ts->name, ts->groupid, ts->members,
783 ts->error, ts->verror, (int) ts->pid,
784 time_buf);
785 }
786
787 if (strlen(ts->description))
788 log_buf(out, " Description : [%s]\n", ts->description);
789
790 if (ts->io_bytes[DDIR_READ])
791 show_ddir_status(rs, ts, DDIR_READ, out);
792 if (ts->io_bytes[DDIR_WRITE])
793 show_ddir_status(rs, ts, DDIR_WRITE, out);
794 if (ts->io_bytes[DDIR_TRIM])
795 show_ddir_status(rs, ts, DDIR_TRIM, out);
796
797 show_latencies(ts, out);
798
799 runtime = ts->total_run_time;
800 if (runtime) {
801 double runt = (double) runtime;
802
803 usr_cpu = (double) ts->usr_time * 100 / runt;
804 sys_cpu = (double) ts->sys_time * 100 / runt;
805 } else {
806 usr_cpu = 0;
807 sys_cpu = 0;
808 }
809
810 log_buf(out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
811 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
812 (unsigned long long) ts->ctx,
813 (unsigned long long) ts->majf,
814 (unsigned long long) ts->minf);
815
816 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
817 log_buf(out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
818 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
819 io_u_dist[1], io_u_dist[2],
820 io_u_dist[3], io_u_dist[4],
821 io_u_dist[5], io_u_dist[6]);
822
823 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
824 log_buf(out, " submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
825 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
826 io_u_dist[1], io_u_dist[2],
827 io_u_dist[3], io_u_dist[4],
828 io_u_dist[5], io_u_dist[6]);
829 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
830 log_buf(out, " complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
831 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
832 io_u_dist[1], io_u_dist[2],
833 io_u_dist[3], io_u_dist[4],
834 io_u_dist[5], io_u_dist[6]);
835 log_buf(out, " issued rwt: total=%llu,%llu,%llu,"
836 " short=%llu,%llu,%llu,"
837 " dropped=%llu,%llu,%llu\n",
838 (unsigned long long) ts->total_io_u[0],
839 (unsigned long long) ts->total_io_u[1],
840 (unsigned long long) ts->total_io_u[2],
841 (unsigned long long) ts->short_io_u[0],
842 (unsigned long long) ts->short_io_u[1],
843 (unsigned long long) ts->short_io_u[2],
844 (unsigned long long) ts->drop_io_u[0],
845 (unsigned long long) ts->drop_io_u[1],
846 (unsigned long long) ts->drop_io_u[2]);
847 if (ts->continue_on_error) {
848 log_buf(out, " errors : total=%llu, first_error=%d/<%s>\n",
849 (unsigned long long)ts->total_err_count,
850 ts->first_error,
851 strerror(ts->first_error));
852 }
853 if (ts->latency_depth) {
854 log_buf(out, " latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
855 (unsigned long long)ts->latency_target,
856 (unsigned long long)ts->latency_window,
857 ts->latency_percentile.u.f,
858 ts->latency_depth);
859 }
860
861 if (ts->nr_block_infos)
862 show_block_infos(ts->nr_block_infos, ts->block_infos,
863 ts->percentile_list, out);
864
865 if (ts->ss_dur)
866 show_ss_normal(ts, out);
867}
868
869static void show_ddir_status_terse(struct thread_stat *ts,
870 struct group_run_stats *rs, int ddir,
871 int ver, struct buf_output *out)
872{
873 unsigned long long min, max, minv, maxv, bw, iops;
874 unsigned long long *ovals = NULL;
875 double mean, dev;
876 unsigned int len;
877 int i, bw_stat;
878
879 assert(ddir_rw(ddir));
880
881 iops = bw = 0;
882 if (ts->runtime[ddir]) {
883 uint64_t runt = ts->runtime[ddir];
884
885 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
886 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
887 }
888
889 log_buf(out, ";%llu;%llu;%llu;%llu",
890 (unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
891 (unsigned long long) ts->runtime[ddir]);
892
893 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
894 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
895 else
896 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
897
898 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
899 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
900 else
901 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
902
903 if (ts->clat_percentiles || ts->lat_percentiles) {
904 len = calc_clat_percentiles(ts->io_u_plat[ddir],
905 ts->clat_stat[ddir].samples,
906 ts->percentile_list, &ovals, &maxv,
907 &minv);
908 } else
909 len = 0;
910
911 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
912 if (i >= len) {
913 log_buf(out, ";0%%=0");
914 continue;
915 }
916 log_buf(out, ";%f%%=%llu", ts->percentile_list[i].u.f, ovals[i]/1000);
917 }
918
919 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
920 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
921 else
922 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
923
924 if (ovals)
925 free(ovals);
926
927 bw_stat = calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev);
928 if (bw_stat) {
929 double p_of_agg = 100.0;
930
931 if (rs->agg[ddir]) {
932 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
933 if (p_of_agg > 100.0)
934 p_of_agg = 100.0;
935 }
936
937 log_buf(out, ";%llu;%llu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
938 } else
939 log_buf(out, ";%llu;%llu;%f%%;%f;%f", 0ULL, 0ULL, 0.0, 0.0, 0.0);
940
941 if (ver == 5) {
942 if (bw_stat)
943 log_buf(out, ";%" PRIu64, (&ts->bw_stat[ddir])->samples);
944 else
945 log_buf(out, ";%lu", 0UL);
946
947 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev))
948 log_buf(out, ";%llu;%llu;%f;%f;%" PRIu64, min, max,
949 mean, dev, (&ts->iops_stat[ddir])->samples);
950 else
951 log_buf(out, ";%llu;%llu;%f;%f;%lu", 0ULL, 0ULL, 0.0, 0.0, 0UL);
952 }
953}
954
955static void add_ddir_status_json(struct thread_stat *ts,
956 struct group_run_stats *rs, int ddir, struct json_object *parent)
957{
958 unsigned long long min, max, minv, maxv;
959 unsigned long long bw;
960 unsigned long long *ovals = NULL;
961 double mean, dev, iops;
962 unsigned int len;
963 int i;
964 const char *ddirname[] = {"read", "write", "trim"};
965 struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object = NULL;
966 char buf[120];
967 double p_of_agg = 100.0;
968
969 assert(ddir_rw(ddir));
970
971 if (ts->unified_rw_rep && ddir != DDIR_READ)
972 return;
973
974 dir_object = json_create_object();
975 json_object_add_value_object(parent,
976 ts->unified_rw_rep ? "mixed" : ddirname[ddir], dir_object);
977
978 bw = 0;
979 iops = 0.0;
980 if (ts->runtime[ddir]) {
981 uint64_t runt = ts->runtime[ddir];
982
983 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
984 iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt;
985 }
986
987 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir]);
988 json_object_add_value_int(dir_object, "io_kbytes", ts->io_bytes[ddir] >> 10);
989 json_object_add_value_int(dir_object, "bw", bw);
990 json_object_add_value_float(dir_object, "iops", iops);
991 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
992 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[ddir]);
993 json_object_add_value_int(dir_object, "short_ios", ts->short_io_u[ddir]);
994 json_object_add_value_int(dir_object, "drop_ios", ts->drop_io_u[ddir]);
995
996 if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
997 min = max = 0;
998 mean = dev = 0.0;
999 }
1000 tmp_object = json_create_object();
1001 json_object_add_value_object(dir_object, "slat_ns", tmp_object);
1002 json_object_add_value_int(tmp_object, "min", min);
1003 json_object_add_value_int(tmp_object, "max", max);
1004 json_object_add_value_float(tmp_object, "mean", mean);
1005 json_object_add_value_float(tmp_object, "stddev", dev);
1006
1007 if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
1008 min = max = 0;
1009 mean = dev = 0.0;
1010 }
1011 tmp_object = json_create_object();
1012 json_object_add_value_object(dir_object, "clat_ns", tmp_object);
1013 json_object_add_value_int(tmp_object, "min", min);
1014 json_object_add_value_int(tmp_object, "max", max);
1015 json_object_add_value_float(tmp_object, "mean", mean);
1016 json_object_add_value_float(tmp_object, "stddev", dev);
1017
1018 if (ts->clat_percentiles || ts->lat_percentiles) {
1019 len = calc_clat_percentiles(ts->io_u_plat[ddir],
1020 ts->clat_stat[ddir].samples,
1021 ts->percentile_list, &ovals, &maxv,
1022 &minv);
1023 } else
1024 len = 0;
1025
1026 percentile_object = json_create_object();
1027 json_object_add_value_object(tmp_object, "percentile", percentile_object);
1028 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1029 if (i >= len) {
1030 json_object_add_value_int(percentile_object, "0.00", 0);
1031 continue;
1032 }
1033 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
1034 json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
1035 }
1036
1037 if (output_format & FIO_OUTPUT_JSON_PLUS) {
1038 clat_bins_object = json_create_object();
1039 if (ts->clat_percentiles)
1040 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
1041
1042 for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
1043 if (ts->io_u_plat[ddir][i]) {
1044 snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
1045 json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]);
1046 }
1047 }
1048 }
1049
1050 if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
1051 min = max = 0;
1052 mean = dev = 0.0;
1053 }
1054 tmp_object = json_create_object();
1055 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1056 json_object_add_value_int(tmp_object, "min", min);
1057 json_object_add_value_int(tmp_object, "max", max);
1058 json_object_add_value_float(tmp_object, "mean", mean);
1059 json_object_add_value_float(tmp_object, "stddev", dev);
1060 if (output_format & FIO_OUTPUT_JSON_PLUS && ts->lat_percentiles)
1061 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
1062
1063 if (ovals)
1064 free(ovals);
1065
1066 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
1067 if (rs->agg[ddir]) {
1068 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
1069 if (p_of_agg > 100.0)
1070 p_of_agg = 100.0;
1071 }
1072 } else {
1073 min = max = 0;
1074 p_of_agg = mean = dev = 0.0;
1075 }
1076 json_object_add_value_int(dir_object, "bw_min", min);
1077 json_object_add_value_int(dir_object, "bw_max", max);
1078 json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
1079 json_object_add_value_float(dir_object, "bw_mean", mean);
1080 json_object_add_value_float(dir_object, "bw_dev", dev);
1081 json_object_add_value_int(dir_object, "bw_samples",
1082 (&ts->bw_stat[ddir])->samples);
1083
1084 if (!calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
1085 min = max = 0;
1086 mean = dev = 0.0;
1087 }
1088 json_object_add_value_int(dir_object, "iops_min", min);
1089 json_object_add_value_int(dir_object, "iops_max", max);
1090 json_object_add_value_float(dir_object, "iops_mean", mean);
1091 json_object_add_value_float(dir_object, "iops_stddev", dev);
1092 json_object_add_value_int(dir_object, "iops_samples",
1093 (&ts->iops_stat[ddir])->samples);
1094}
1095
1096static void show_thread_status_terse_all(struct thread_stat *ts,
1097 struct group_run_stats *rs, int ver,
1098 struct buf_output *out)
1099{
1100 double io_u_dist[FIO_IO_U_MAP_NR];
1101 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1102 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1103 double usr_cpu, sys_cpu;
1104 int i;
1105
1106 /* General Info */
1107 if (ver == 2)
1108 log_buf(out, "2;%s;%d;%d", ts->name, ts->groupid, ts->error);
1109 else
1110 log_buf(out, "%d;%s;%s;%d;%d", ver, fio_version_string,
1111 ts->name, ts->groupid, ts->error);
1112
1113 /* Log Read Status */
1114 show_ddir_status_terse(ts, rs, DDIR_READ, ver, out);
1115 /* Log Write Status */
1116 show_ddir_status_terse(ts, rs, DDIR_WRITE, ver, out);
1117 /* Log Trim Status */
1118 if (ver == 2 || ver == 4 || ver == 5)
1119 show_ddir_status_terse(ts, rs, DDIR_TRIM, ver, out);
1120
1121 /* CPU Usage */
1122 if (ts->total_run_time) {
1123 double runt = (double) ts->total_run_time;
1124
1125 usr_cpu = (double) ts->usr_time * 100 / runt;
1126 sys_cpu = (double) ts->sys_time * 100 / runt;
1127 } else {
1128 usr_cpu = 0;
1129 sys_cpu = 0;
1130 }
1131
1132 log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1133 (unsigned long long) ts->ctx,
1134 (unsigned long long) ts->majf,
1135 (unsigned long long) ts->minf);
1136
1137 /* Calc % distribution of IO depths, usecond, msecond latency */
1138 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1139 stat_calc_lat_nu(ts, io_u_lat_u);
1140 stat_calc_lat_m(ts, io_u_lat_m);
1141
1142 /* Only show fixed 7 I/O depth levels*/
1143 log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1144 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1145 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1146
1147 /* Microsecond latency */
1148 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1149 log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1150 /* Millisecond latency */
1151 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1152 log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1153
1154 /* disk util stats, if any */
1155 if (ver >= 3)
1156 show_disk_util(1, NULL, out);
1157
1158 /* Additional output if continue_on_error set - default off*/
1159 if (ts->continue_on_error)
1160 log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1161 if (ver == 2)
1162 log_buf(out, "\n");
1163
1164 /* Additional output if description is set */
1165 if (strlen(ts->description))
1166 log_buf(out, ";%s", ts->description);
1167
1168 log_buf(out, "\n");
1169}
1170
1171static void json_add_job_opts(struct json_object *root, const char *name,
1172 struct flist_head *opt_list, bool num_jobs)
1173{
1174 struct json_object *dir_object;
1175 struct flist_head *entry;
1176 struct print_option *p;
1177
1178 if (flist_empty(opt_list))
1179 return;
1180
1181 dir_object = json_create_object();
1182 json_object_add_value_object(root, name, dir_object);
1183
1184 flist_for_each(entry, opt_list) {
1185 const char *pos = "";
1186
1187 p = flist_entry(entry, struct print_option, list);
1188 if (!num_jobs && !strcmp(p->name, "numjobs"))
1189 continue;
1190 if (p->value)
1191 pos = p->value;
1192 json_object_add_value_string(dir_object, p->name, pos);
1193 }
1194}
1195
1196static struct json_object *show_thread_status_json(struct thread_stat *ts,
1197 struct group_run_stats *rs,
1198 struct flist_head *opt_list)
1199{
1200 struct json_object *root, *tmp;
1201 struct jobs_eta *je;
1202 double io_u_dist[FIO_IO_U_MAP_NR];
1203 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
1204 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1205 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1206 double usr_cpu, sys_cpu;
1207 int i;
1208 size_t size;
1209
1210 root = json_create_object();
1211 json_object_add_value_string(root, "jobname", ts->name);
1212 json_object_add_value_int(root, "groupid", ts->groupid);
1213 json_object_add_value_int(root, "error", ts->error);
1214
1215 /* ETA Info */
1216 je = get_jobs_eta(true, &size);
1217 if (je) {
1218 json_object_add_value_int(root, "eta", je->eta_sec);
1219 json_object_add_value_int(root, "elapsed", je->elapsed_sec);
1220 }
1221
1222 if (opt_list)
1223 json_add_job_opts(root, "job options", opt_list, true);
1224
1225 add_ddir_status_json(ts, rs, DDIR_READ, root);
1226 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
1227 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
1228
1229 /* CPU Usage */
1230 if (ts->total_run_time) {
1231 double runt = (double) ts->total_run_time;
1232
1233 usr_cpu = (double) ts->usr_time * 100 / runt;
1234 sys_cpu = (double) ts->sys_time * 100 / runt;
1235 } else {
1236 usr_cpu = 0;
1237 sys_cpu = 0;
1238 }
1239 json_object_add_value_float(root, "usr_cpu", usr_cpu);
1240 json_object_add_value_float(root, "sys_cpu", sys_cpu);
1241 json_object_add_value_int(root, "ctx", ts->ctx);
1242 json_object_add_value_int(root, "majf", ts->majf);
1243 json_object_add_value_int(root, "minf", ts->minf);
1244
1245
1246 /* Calc % distribution of IO depths, usecond, msecond latency */
1247 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1248 stat_calc_lat_n(ts, io_u_lat_n);
1249 stat_calc_lat_u(ts, io_u_lat_u);
1250 stat_calc_lat_m(ts, io_u_lat_m);
1251
1252 tmp = json_create_object();
1253 json_object_add_value_object(root, "iodepth_level", tmp);
1254 /* Only show fixed 7 I/O depth levels*/
1255 for (i = 0; i < 7; i++) {
1256 char name[20];
1257 if (i < 6)
1258 snprintf(name, 20, "%d", 1 << i);
1259 else
1260 snprintf(name, 20, ">=%d", 1 << i);
1261 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1262 }
1263
1264 /* Nanosecond latency */
1265 tmp = json_create_object();
1266 json_object_add_value_object(root, "latency_ns", tmp);
1267 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++) {
1268 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1269 "250", "500", "750", "1000", };
1270 json_object_add_value_float(tmp, ranges[i], io_u_lat_n[i]);
1271 }
1272 /* Microsecond latency */
1273 tmp = json_create_object();
1274 json_object_add_value_object(root, "latency_us", tmp);
1275 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1276 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1277 "250", "500", "750", "1000", };
1278 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
1279 }
1280 /* Millisecond latency */
1281 tmp = json_create_object();
1282 json_object_add_value_object(root, "latency_ms", tmp);
1283 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
1284 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1285 "250", "500", "750", "1000", "2000",
1286 ">=2000", };
1287 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
1288 }
1289
1290 /* Additional output if continue_on_error set - default off*/
1291 if (ts->continue_on_error) {
1292 json_object_add_value_int(root, "total_err", ts->total_err_count);
1293 json_object_add_value_int(root, "first_error", ts->first_error);
1294 }
1295
1296 if (ts->latency_depth) {
1297 json_object_add_value_int(root, "latency_depth", ts->latency_depth);
1298 json_object_add_value_int(root, "latency_target", ts->latency_target);
1299 json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
1300 json_object_add_value_int(root, "latency_window", ts->latency_window);
1301 }
1302
1303 /* Additional output if description is set */
1304 if (strlen(ts->description))
1305 json_object_add_value_string(root, "desc", ts->description);
1306
1307 if (ts->nr_block_infos) {
1308 /* Block error histogram and types */
1309 int len;
1310 unsigned int *percentiles = NULL;
1311 unsigned int block_state_counts[BLOCK_STATE_COUNT];
1312
1313 len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
1314 ts->percentile_list,
1315 &percentiles, block_state_counts);
1316
1317 if (len) {
1318 struct json_object *block, *percentile_object, *states;
1319 int state;
1320 block = json_create_object();
1321 json_object_add_value_object(root, "block", block);
1322
1323 percentile_object = json_create_object();
1324 json_object_add_value_object(block, "percentiles",
1325 percentile_object);
1326 for (i = 0; i < len; i++) {
1327 char buf[20];
1328 snprintf(buf, sizeof(buf), "%f",
1329 ts->percentile_list[i].u.f);
1330 json_object_add_value_int(percentile_object,
1331 (const char *)buf,
1332 percentiles[i]);
1333 }
1334
1335 states = json_create_object();
1336 json_object_add_value_object(block, "states", states);
1337 for (state = 0; state < BLOCK_STATE_COUNT; state++) {
1338 json_object_add_value_int(states,
1339 block_state_names[state],
1340 block_state_counts[state]);
1341 }
1342 free(percentiles);
1343 }
1344 }
1345
1346 if (ts->ss_dur) {
1347 struct json_object *data;
1348 struct json_array *iops, *bw;
1349 int i, j, k;
1350 char ss_buf[64];
1351
1352 snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
1353 ts->ss_state & __FIO_SS_IOPS ? "iops" : "bw",
1354 ts->ss_state & __FIO_SS_SLOPE ? "_slope" : "",
1355 (float) ts->ss_limit.u.f,
1356 ts->ss_state & __FIO_SS_PCT ? "%" : "");
1357
1358 tmp = json_create_object();
1359 json_object_add_value_object(root, "steadystate", tmp);
1360 json_object_add_value_string(tmp, "ss", ss_buf);
1361 json_object_add_value_int(tmp, "duration", (int)ts->ss_dur);
1362 json_object_add_value_int(tmp, "attained", (ts->ss_state & __FIO_SS_ATTAINED) > 0);
1363
1364 snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ts->ss_criterion.u.f,
1365 ts->ss_state & __FIO_SS_PCT ? "%" : "");
1366 json_object_add_value_string(tmp, "criterion", ss_buf);
1367 json_object_add_value_float(tmp, "max_deviation", ts->ss_deviation.u.f);
1368 json_object_add_value_float(tmp, "slope", ts->ss_slope.u.f);
1369
1370 data = json_create_object();
1371 json_object_add_value_object(tmp, "data", data);
1372 bw = json_create_array();
1373 iops = json_create_array();
1374
1375 /*
1376 ** if ss was attained or the buffer is not full,
1377 ** ss->head points to the first element in the list.
1378 ** otherwise it actually points to the second element
1379 ** in the list
1380 */
1381 if ((ts->ss_state & __FIO_SS_ATTAINED) || !(ts->ss_state & __FIO_SS_BUFFER_FULL))
1382 j = ts->ss_head;
1383 else
1384 j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
1385 for (i = 0; i < ts->ss_dur; i++) {
1386 k = (j + i) % ts->ss_dur;
1387 json_array_add_value_int(bw, ts->ss_bw_data[k]);
1388 json_array_add_value_int(iops, ts->ss_iops_data[k]);
1389 }
1390 json_object_add_value_int(data, "bw_mean", steadystate_bw_mean(ts));
1391 json_object_add_value_int(data, "iops_mean", steadystate_iops_mean(ts));
1392 json_object_add_value_array(data, "iops", iops);
1393 json_object_add_value_array(data, "bw", bw);
1394 }
1395
1396 return root;
1397}
1398
1399static void show_thread_status_terse(struct thread_stat *ts,
1400 struct group_run_stats *rs,
1401 struct buf_output *out)
1402{
1403 if (terse_version >= 2 && terse_version <= 5)
1404 show_thread_status_terse_all(ts, rs, terse_version, out);
1405 else
1406 log_err("fio: bad terse version!? %d\n", terse_version);
1407}
1408
1409struct json_object *show_thread_status(struct thread_stat *ts,
1410 struct group_run_stats *rs,
1411 struct flist_head *opt_list,
1412 struct buf_output *out)
1413{
1414 struct json_object *ret = NULL;
1415
1416 if (output_format & FIO_OUTPUT_TERSE)
1417 show_thread_status_terse(ts, rs, out);
1418 if (output_format & FIO_OUTPUT_JSON)
1419 ret = show_thread_status_json(ts, rs, opt_list);
1420 if (output_format & FIO_OUTPUT_NORMAL)
1421 show_thread_status_normal(ts, rs, out);
1422
1423 return ret;
1424}
1425
1426static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
1427{
1428 double mean, S;
1429
1430 if (src->samples == 0)
1431 return;
1432
1433 dst->min_val = min(dst->min_val, src->min_val);
1434 dst->max_val = max(dst->max_val, src->max_val);
1435
1436 /*
1437 * Compute new mean and S after the merge
1438 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1439 * #Parallel_algorithm>
1440 */
1441 if (first) {
1442 mean = src->mean.u.f;
1443 S = src->S.u.f;
1444 } else {
1445 double delta = src->mean.u.f - dst->mean.u.f;
1446
1447 mean = ((src->mean.u.f * src->samples) +
1448 (dst->mean.u.f * dst->samples)) /
1449 (dst->samples + src->samples);
1450
1451 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1452 (dst->samples * src->samples) /
1453 (dst->samples + src->samples);
1454 }
1455
1456 dst->samples += src->samples;
1457 dst->mean.u.f = mean;
1458 dst->S.u.f = S;
1459}
1460
1461void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1462{
1463 int i;
1464
1465 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1466 if (dst->max_run[i] < src->max_run[i])
1467 dst->max_run[i] = src->max_run[i];
1468 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1469 dst->min_run[i] = src->min_run[i];
1470 if (dst->max_bw[i] < src->max_bw[i])
1471 dst->max_bw[i] = src->max_bw[i];
1472 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1473 dst->min_bw[i] = src->min_bw[i];
1474
1475 dst->iobytes[i] += src->iobytes[i];
1476 dst->agg[i] += src->agg[i];
1477 }
1478
1479 if (!dst->kb_base)
1480 dst->kb_base = src->kb_base;
1481 if (!dst->unit_base)
1482 dst->unit_base = src->unit_base;
1483}
1484
1485void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
1486 bool first)
1487{
1488 int l, k;
1489
1490 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1491 if (!dst->unified_rw_rep) {
1492 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first);
1493 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first);
1494 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first);
1495 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first);
1496 sum_stat(&dst->iops_stat[l], &src->iops_stat[l], first);
1497
1498 dst->io_bytes[l] += src->io_bytes[l];
1499
1500 if (dst->runtime[l] < src->runtime[l])
1501 dst->runtime[l] = src->runtime[l];
1502 } else {
1503 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first);
1504 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first);
1505 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first);
1506 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first);
1507 sum_stat(&dst->iops_stat[0], &src->iops_stat[l], first);
1508
1509 dst->io_bytes[0] += src->io_bytes[l];
1510
1511 if (dst->runtime[0] < src->runtime[l])
1512 dst->runtime[0] = src->runtime[l];
1513
1514 /*
1515 * We're summing to the same destination, so override
1516 * 'first' after the first iteration of the loop
1517 */
1518 first = false;
1519 }
1520 }
1521
1522 dst->usr_time += src->usr_time;
1523 dst->sys_time += src->sys_time;
1524 dst->ctx += src->ctx;
1525 dst->majf += src->majf;
1526 dst->minf += src->minf;
1527
1528 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1529 dst->io_u_map[k] += src->io_u_map[k];
1530 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1531 dst->io_u_submit[k] += src->io_u_submit[k];
1532 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1533 dst->io_u_complete[k] += src->io_u_complete[k];
1534 for (k = 0; k < FIO_IO_U_LAT_N_NR; k++)
1535 dst->io_u_lat_n[k] += src->io_u_lat_n[k];
1536 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1537 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1538 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1539 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1540
1541 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1542 if (!dst->unified_rw_rep) {
1543 dst->total_io_u[k] += src->total_io_u[k];
1544 dst->short_io_u[k] += src->short_io_u[k];
1545 dst->drop_io_u[k] += src->drop_io_u[k];
1546 } else {
1547 dst->total_io_u[0] += src->total_io_u[k];
1548 dst->short_io_u[0] += src->short_io_u[k];
1549 dst->drop_io_u[0] += src->drop_io_u[k];
1550 }
1551 }
1552
1553 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1554 int m;
1555
1556 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1557 if (!dst->unified_rw_rep)
1558 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1559 else
1560 dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1561 }
1562 }
1563
1564 dst->total_run_time += src->total_run_time;
1565 dst->total_submit += src->total_submit;
1566 dst->total_complete += src->total_complete;
1567}
1568
1569void init_group_run_stat(struct group_run_stats *gs)
1570{
1571 int i;
1572 memset(gs, 0, sizeof(*gs));
1573
1574 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1575 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1576}
1577
1578void init_thread_stat(struct thread_stat *ts)
1579{
1580 int j;
1581
1582 memset(ts, 0, sizeof(*ts));
1583
1584 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1585 ts->lat_stat[j].min_val = -1UL;
1586 ts->clat_stat[j].min_val = -1UL;
1587 ts->slat_stat[j].min_val = -1UL;
1588 ts->bw_stat[j].min_val = -1UL;
1589 ts->iops_stat[j].min_val = -1UL;
1590 }
1591 ts->groupid = -1;
1592}
1593
1594void __show_run_stats(void)
1595{
1596 struct group_run_stats *runstats, *rs;
1597 struct thread_data *td;
1598 struct thread_stat *threadstats, *ts;
1599 int i, j, k, nr_ts, last_ts, idx;
1600 bool kb_base_warned = false;
1601 bool unit_base_warned = false;
1602 struct json_object *root = NULL;
1603 struct json_array *array = NULL;
1604 struct buf_output output[FIO_OUTPUT_NR];
1605 struct flist_head **opt_lists;
1606
1607 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1608
1609 for (i = 0; i < groupid + 1; i++)
1610 init_group_run_stat(&runstats[i]);
1611
1612 /*
1613 * find out how many threads stats we need. if group reporting isn't
1614 * enabled, it's one-per-td.
1615 */
1616 nr_ts = 0;
1617 last_ts = -1;
1618 for_each_td(td, i) {
1619 if (!td->o.group_reporting) {
1620 nr_ts++;
1621 continue;
1622 }
1623 if (last_ts == td->groupid)
1624 continue;
1625 if (!td->o.stats)
1626 continue;
1627
1628 last_ts = td->groupid;
1629 nr_ts++;
1630 }
1631
1632 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1633 opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
1634
1635 for (i = 0; i < nr_ts; i++) {
1636 init_thread_stat(&threadstats[i]);
1637 opt_lists[i] = NULL;
1638 }
1639
1640 j = 0;
1641 last_ts = -1;
1642 idx = 0;
1643 for_each_td(td, i) {
1644 if (!td->o.stats)
1645 continue;
1646 if (idx && (!td->o.group_reporting ||
1647 (td->o.group_reporting && last_ts != td->groupid))) {
1648 idx = 0;
1649 j++;
1650 }
1651
1652 last_ts = td->groupid;
1653
1654 ts = &threadstats[j];
1655
1656 ts->clat_percentiles = td->o.clat_percentiles;
1657 ts->lat_percentiles = td->o.lat_percentiles;
1658 ts->percentile_precision = td->o.percentile_precision;
1659 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1660 opt_lists[j] = &td->opt_list;
1661
1662 idx++;
1663 ts->members++;
1664
1665 if (ts->groupid == -1) {
1666 /*
1667 * These are per-group shared already
1668 */
1669 strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE - 1);
1670 if (td->o.description)
1671 strncpy(ts->description, td->o.description,
1672 FIO_JOBDESC_SIZE - 1);
1673 else
1674 memset(ts->description, 0, FIO_JOBDESC_SIZE);
1675
1676 /*
1677 * If multiple entries in this group, this is
1678 * the first member.
1679 */
1680 ts->thread_number = td->thread_number;
1681 ts->groupid = td->groupid;
1682
1683 /*
1684 * first pid in group, not very useful...
1685 */
1686 ts->pid = td->pid;
1687
1688 ts->kb_base = td->o.kb_base;
1689 ts->unit_base = td->o.unit_base;
1690 ts->unified_rw_rep = td->o.unified_rw_rep;
1691 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1692 log_info("fio: kb_base differs for jobs in group, using"
1693 " %u as the base\n", ts->kb_base);
1694 kb_base_warned = true;
1695 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1696 log_info("fio: unit_base differs for jobs in group, using"
1697 " %u as the base\n", ts->unit_base);
1698 unit_base_warned = true;
1699 }
1700
1701 ts->continue_on_error = td->o.continue_on_error;
1702 ts->total_err_count += td->total_err_count;
1703 ts->first_error = td->first_error;
1704 if (!ts->error) {
1705 if (!td->error && td->o.continue_on_error &&
1706 td->first_error) {
1707 ts->error = td->first_error;
1708 ts->verror[sizeof(ts->verror) - 1] = '\0';
1709 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1710 } else if (td->error) {
1711 ts->error = td->error;
1712 ts->verror[sizeof(ts->verror) - 1] = '\0';
1713 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1714 }
1715 }
1716
1717 ts->latency_depth = td->latency_qd;
1718 ts->latency_target = td->o.latency_target;
1719 ts->latency_percentile = td->o.latency_percentile;
1720 ts->latency_window = td->o.latency_window;
1721
1722 ts->nr_block_infos = td->ts.nr_block_infos;
1723 for (k = 0; k < ts->nr_block_infos; k++)
1724 ts->block_infos[k] = td->ts.block_infos[k];
1725
1726 sum_thread_stats(ts, &td->ts, idx == 1);
1727
1728 if (td->o.ss_dur) {
1729 ts->ss_state = td->ss.state;
1730 ts->ss_dur = td->ss.dur;
1731 ts->ss_head = td->ss.head;
1732 ts->ss_bw_data = td->ss.bw_data;
1733 ts->ss_iops_data = td->ss.iops_data;
1734 ts->ss_limit.u.f = td->ss.limit;
1735 ts->ss_slope.u.f = td->ss.slope;
1736 ts->ss_deviation.u.f = td->ss.deviation;
1737 ts->ss_criterion.u.f = td->ss.criterion;
1738 }
1739 else
1740 ts->ss_dur = ts->ss_state = 0;
1741 }
1742
1743 for (i = 0; i < nr_ts; i++) {
1744 unsigned long long bw;
1745
1746 ts = &threadstats[i];
1747 if (ts->groupid == -1)
1748 continue;
1749 rs = &runstats[ts->groupid];
1750 rs->kb_base = ts->kb_base;
1751 rs->unit_base = ts->unit_base;
1752 rs->unified_rw_rep += ts->unified_rw_rep;
1753
1754 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1755 if (!ts->runtime[j])
1756 continue;
1757 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1758 rs->min_run[j] = ts->runtime[j];
1759 if (ts->runtime[j] > rs->max_run[j])
1760 rs->max_run[j] = ts->runtime[j];
1761
1762 bw = 0;
1763 if (ts->runtime[j])
1764 bw = ts->io_bytes[j] * 1000 / ts->runtime[j];
1765 if (bw < rs->min_bw[j])
1766 rs->min_bw[j] = bw;
1767 if (bw > rs->max_bw[j])
1768 rs->max_bw[j] = bw;
1769
1770 rs->iobytes[j] += ts->io_bytes[j];
1771 }
1772 }
1773
1774 for (i = 0; i < groupid + 1; i++) {
1775 int ddir;
1776
1777 rs = &runstats[i];
1778
1779 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1780 if (rs->max_run[ddir])
1781 rs->agg[ddir] = (rs->iobytes[ddir] * 1000) /
1782 rs->max_run[ddir];
1783 }
1784 }
1785
1786 for (i = 0; i < FIO_OUTPUT_NR; i++)
1787 buf_output_init(&output[i]);
1788
1789 /*
1790 * don't overwrite last signal output
1791 */
1792 if (output_format & FIO_OUTPUT_NORMAL)
1793 log_buf(&output[__FIO_OUTPUT_NORMAL], "\n");
1794 if (output_format & FIO_OUTPUT_JSON) {
1795 struct thread_data *global;
1796 char time_buf[32];
1797 struct timeval now;
1798 unsigned long long ms_since_epoch;
1799
1800 gettimeofday(&now, NULL);
1801 ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
1802 (unsigned long long)(now.tv_usec) / 1000;
1803
1804 os_ctime_r((const time_t *) &now.tv_sec, time_buf,
1805 sizeof(time_buf));
1806 if (time_buf[strlen(time_buf) - 1] == '\n')
1807 time_buf[strlen(time_buf) - 1] = '\0';
1808
1809 root = json_create_object();
1810 json_object_add_value_string(root, "fio version", fio_version_string);
1811 json_object_add_value_int(root, "timestamp", now.tv_sec);
1812 json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
1813 json_object_add_value_string(root, "time", time_buf);
1814 global = get_global_options();
1815 json_add_job_opts(root, "global options", &global->opt_list, false);
1816 array = json_create_array();
1817 json_object_add_value_array(root, "jobs", array);
1818 }
1819
1820 if (is_backend)
1821 fio_server_send_job_options(&get_global_options()->opt_list, -1U);
1822
1823 for (i = 0; i < nr_ts; i++) {
1824 ts = &threadstats[i];
1825 rs = &runstats[ts->groupid];
1826
1827 if (is_backend) {
1828 fio_server_send_job_options(opt_lists[i], i);
1829 fio_server_send_ts(ts, rs);
1830 } else {
1831 if (output_format & FIO_OUTPUT_TERSE)
1832 show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
1833 if (output_format & FIO_OUTPUT_JSON) {
1834 struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]);
1835 json_array_add_value_object(array, tmp);
1836 }
1837 if (output_format & FIO_OUTPUT_NORMAL)
1838 show_thread_status_normal(ts, rs, &output[__FIO_OUTPUT_NORMAL]);
1839 }
1840 }
1841 if (!is_backend && (output_format & FIO_OUTPUT_JSON)) {
1842 /* disk util stats, if any */
1843 show_disk_util(1, root, &output[__FIO_OUTPUT_JSON]);
1844
1845 show_idle_prof_stats(FIO_OUTPUT_JSON, root, &output[__FIO_OUTPUT_JSON]);
1846
1847 json_print_object(root, &output[__FIO_OUTPUT_JSON]);
1848 log_buf(&output[__FIO_OUTPUT_JSON], "\n");
1849 json_free_object(root);
1850 }
1851
1852 for (i = 0; i < groupid + 1; i++) {
1853 rs = &runstats[i];
1854
1855 rs->groupid = i;
1856 if (is_backend)
1857 fio_server_send_gs(rs);
1858 else if (output_format & FIO_OUTPUT_NORMAL)
1859 show_group_stats(rs, &output[__FIO_OUTPUT_NORMAL]);
1860 }
1861
1862 if (is_backend)
1863 fio_server_send_du();
1864 else if (output_format & FIO_OUTPUT_NORMAL) {
1865 show_disk_util(0, NULL, &output[__FIO_OUTPUT_NORMAL]);
1866 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, &output[__FIO_OUTPUT_NORMAL]);
1867 }
1868
1869 for (i = 0; i < FIO_OUTPUT_NR; i++) {
1870 struct buf_output *out = &output[i];
1871
1872 log_info_buf(out->buf, out->buflen);
1873 buf_output_free(out);
1874 }
1875
1876 log_info_flush();
1877 free(runstats);
1878 free(threadstats);
1879 free(opt_lists);
1880}
1881
1882void show_run_stats(void)
1883{
1884 fio_mutex_down(stat_mutex);
1885 __show_run_stats();
1886 fio_mutex_up(stat_mutex);
1887}
1888
1889void __show_running_run_stats(void)
1890{
1891 struct thread_data *td;
1892 unsigned long long *rt;
1893 struct timespec ts;
1894 int i;
1895
1896 fio_mutex_down(stat_mutex);
1897
1898 rt = malloc(thread_number * sizeof(unsigned long long));
1899 fio_gettime(&ts, NULL);
1900
1901 for_each_td(td, i) {
1902 td->update_rusage = 1;
1903 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1904 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1905 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1906 td->ts.total_run_time = mtime_since(&td->epoch, &ts);
1907
1908 rt[i] = mtime_since(&td->start, &ts);
1909 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1910 td->ts.runtime[DDIR_READ] += rt[i];
1911 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1912 td->ts.runtime[DDIR_WRITE] += rt[i];
1913 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1914 td->ts.runtime[DDIR_TRIM] += rt[i];
1915 }
1916
1917 for_each_td(td, i) {
1918 if (td->runstate >= TD_EXITED)
1919 continue;
1920 if (td->rusage_sem) {
1921 td->update_rusage = 1;
1922 fio_mutex_down(td->rusage_sem);
1923 }
1924 td->update_rusage = 0;
1925 }
1926
1927 __show_run_stats();
1928
1929 for_each_td(td, i) {
1930 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1931 td->ts.runtime[DDIR_READ] -= rt[i];
1932 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1933 td->ts.runtime[DDIR_WRITE] -= rt[i];
1934 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1935 td->ts.runtime[DDIR_TRIM] -= rt[i];
1936 }
1937
1938 free(rt);
1939 fio_mutex_up(stat_mutex);
1940}
1941
1942static bool status_interval_init;
1943static struct timespec status_time;
1944static bool status_file_disabled;
1945
1946#define FIO_STATUS_FILE "fio-dump-status"
1947
1948static int check_status_file(void)
1949{
1950 struct stat sb;
1951 const char *temp_dir;
1952 char fio_status_file_path[PATH_MAX];
1953
1954 if (status_file_disabled)
1955 return 0;
1956
1957 temp_dir = getenv("TMPDIR");
1958 if (temp_dir == NULL) {
1959 temp_dir = getenv("TEMP");
1960 if (temp_dir && strlen(temp_dir) >= PATH_MAX)
1961 temp_dir = NULL;
1962 }
1963 if (temp_dir == NULL)
1964 temp_dir = "/tmp";
1965
1966 snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
1967
1968 if (stat(fio_status_file_path, &sb))
1969 return 0;
1970
1971 if (unlink(fio_status_file_path) < 0) {
1972 log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
1973 strerror(errno));
1974 log_err("fio: disabling status file updates\n");
1975 status_file_disabled = true;
1976 }
1977
1978 return 1;
1979}
1980
1981void check_for_running_stats(void)
1982{
1983 if (status_interval) {
1984 if (!status_interval_init) {
1985 fio_gettime(&status_time, NULL);
1986 status_interval_init = true;
1987 } else if (mtime_since_now(&status_time) >= status_interval) {
1988 show_running_run_stats();
1989 fio_gettime(&status_time, NULL);
1990 return;
1991 }
1992 }
1993 if (check_status_file()) {
1994 show_running_run_stats();
1995 return;
1996 }
1997}
1998
1999static inline void add_stat_sample(struct io_stat *is, unsigned long long data)
2000{
2001 double val = data;
2002 double delta;
2003
2004 if (data > is->max_val)
2005 is->max_val = data;
2006 if (data < is->min_val)
2007 is->min_val = data;
2008
2009 delta = val - is->mean.u.f;
2010 if (delta) {
2011 is->mean.u.f += delta / (is->samples + 1.0);
2012 is->S.u.f += delta * (val - is->mean.u.f);
2013 }
2014
2015 is->samples++;
2016}
2017
2018/*
2019 * Return a struct io_logs, which is added to the tail of the log
2020 * list for 'iolog'.
2021 */
2022static struct io_logs *get_new_log(struct io_log *iolog)
2023{
2024 size_t new_size, new_samples;
2025 struct io_logs *cur_log;
2026
2027 /*
2028 * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
2029 * forever
2030 */
2031 if (!iolog->cur_log_max)
2032 new_samples = DEF_LOG_ENTRIES;
2033 else {
2034 new_samples = iolog->cur_log_max * 2;
2035 if (new_samples > MAX_LOG_ENTRIES)
2036 new_samples = MAX_LOG_ENTRIES;
2037 }
2038
2039 new_size = new_samples * log_entry_sz(iolog);
2040
2041 cur_log = smalloc(sizeof(*cur_log));
2042 if (cur_log) {
2043 INIT_FLIST_HEAD(&cur_log->list);
2044 cur_log->log = malloc(new_size);
2045 if (cur_log->log) {
2046 cur_log->nr_samples = 0;
2047 cur_log->max_samples = new_samples;
2048 flist_add_tail(&cur_log->list, &iolog->io_logs);
2049 iolog->cur_log_max = new_samples;
2050 return cur_log;
2051 }
2052 sfree(cur_log);
2053 }
2054
2055 return NULL;
2056}
2057
2058/*
2059 * Add and return a new log chunk, or return current log if big enough
2060 */
2061static struct io_logs *regrow_log(struct io_log *iolog)
2062{
2063 struct io_logs *cur_log;
2064 int i;
2065
2066 if (!iolog || iolog->disabled)
2067 goto disable;
2068
2069 cur_log = iolog_cur_log(iolog);
2070 if (!cur_log) {
2071 cur_log = get_new_log(iolog);
2072 if (!cur_log)
2073 return NULL;
2074 }
2075
2076 if (cur_log->nr_samples < cur_log->max_samples)
2077 return cur_log;
2078
2079 /*
2080 * No room for a new sample. If we're compressing on the fly, flush
2081 * out the current chunk
2082 */
2083 if (iolog->log_gz) {
2084 if (iolog_cur_flush(iolog, cur_log)) {
2085 log_err("fio: failed flushing iolog! Will stop logging.\n");
2086 return NULL;
2087 }
2088 }
2089
2090 /*
2091 * Get a new log array, and add to our list
2092 */
2093 cur_log = get_new_log(iolog);
2094 if (!cur_log) {
2095 log_err("fio: failed extending iolog! Will stop logging.\n");
2096 return NULL;
2097 }
2098
2099 if (!iolog->pending || !iolog->pending->nr_samples)
2100 return cur_log;
2101
2102 /*
2103 * Flush pending items to new log
2104 */
2105 for (i = 0; i < iolog->pending->nr_samples; i++) {
2106 struct io_sample *src, *dst;
2107
2108 src = get_sample(iolog, iolog->pending, i);
2109 dst = get_sample(iolog, cur_log, i);
2110 memcpy(dst, src, log_entry_sz(iolog));
2111 }
2112 cur_log->nr_samples = iolog->pending->nr_samples;
2113
2114 iolog->pending->nr_samples = 0;
2115 return cur_log;
2116disable:
2117 if (iolog)
2118 iolog->disabled = true;
2119 return NULL;
2120}
2121
2122void regrow_logs(struct thread_data *td)
2123{
2124 regrow_log(td->slat_log);
2125 regrow_log(td->clat_log);
2126 regrow_log(td->clat_hist_log);
2127 regrow_log(td->lat_log);
2128 regrow_log(td->bw_log);
2129 regrow_log(td->iops_log);
2130 td->flags &= ~TD_F_REGROW_LOGS;
2131}
2132
2133static struct io_logs *get_cur_log(struct io_log *iolog)
2134{
2135 struct io_logs *cur_log;
2136
2137 cur_log = iolog_cur_log(iolog);
2138 if (!cur_log) {
2139 cur_log = get_new_log(iolog);
2140 if (!cur_log)
2141 return NULL;
2142 }
2143
2144 if (cur_log->nr_samples < cur_log->max_samples)
2145 return cur_log;
2146
2147 /*
2148 * Out of space. If we're in IO offload mode, or we're not doing
2149 * per unit logging (hence logging happens outside of the IO thread
2150 * as well), add a new log chunk inline. If we're doing inline
2151 * submissions, flag 'td' as needing a log regrow and we'll take
2152 * care of it on the submission side.
2153 */
2154 if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
2155 !per_unit_log(iolog))
2156 return regrow_log(iolog);
2157
2158 iolog->td->flags |= TD_F_REGROW_LOGS;
2159 assert(iolog->pending->nr_samples < iolog->pending->max_samples);
2160 return iolog->pending;
2161}
2162
2163static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
2164 enum fio_ddir ddir, unsigned int bs,
2165 unsigned long t, uint64_t offset)
2166{
2167 struct io_logs *cur_log;
2168
2169 if (iolog->disabled)
2170 return;
2171 if (flist_empty(&iolog->io_logs))
2172 iolog->avg_last[ddir] = t;
2173
2174 cur_log = get_cur_log(iolog);
2175 if (cur_log) {
2176 struct io_sample *s;
2177
2178 s = get_sample(iolog, cur_log, cur_log->nr_samples);
2179
2180 s->data = data;
2181 s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
2182 io_sample_set_ddir(iolog, s, ddir);
2183 s->bs = bs;
2184
2185 if (iolog->log_offset) {
2186 struct io_sample_offset *so = (void *) s;
2187
2188 so->offset = offset;
2189 }
2190
2191 cur_log->nr_samples++;
2192 return;
2193 }
2194
2195 iolog->disabled = true;
2196}
2197
2198static inline void reset_io_stat(struct io_stat *ios)
2199{
2200 ios->max_val = ios->min_val = ios->samples = 0;
2201 ios->mean.u.f = ios->S.u.f = 0;
2202}
2203
2204void reset_io_stats(struct thread_data *td)
2205{
2206 struct thread_stat *ts = &td->ts;
2207 int i, j;
2208
2209 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2210 reset_io_stat(&ts->clat_stat[i]);
2211 reset_io_stat(&ts->slat_stat[i]);
2212 reset_io_stat(&ts->lat_stat[i]);
2213 reset_io_stat(&ts->bw_stat[i]);
2214 reset_io_stat(&ts->iops_stat[i]);
2215
2216 ts->io_bytes[i] = 0;
2217 ts->runtime[i] = 0;
2218 ts->total_io_u[i] = 0;
2219 ts->short_io_u[i] = 0;
2220 ts->drop_io_u[i] = 0;
2221
2222 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
2223 ts->io_u_plat[i][j] = 0;
2224 }
2225
2226 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
2227 ts->io_u_map[i] = 0;
2228 ts->io_u_submit[i] = 0;
2229 ts->io_u_complete[i] = 0;
2230 }
2231
2232 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
2233 ts->io_u_lat_n[i] = 0;
2234 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
2235 ts->io_u_lat_u[i] = 0;
2236 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
2237 ts->io_u_lat_m[i] = 0;
2238
2239 ts->total_submit = 0;
2240 ts->total_complete = 0;
2241}
2242
2243static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
2244 unsigned long elapsed, bool log_max)
2245{
2246 /*
2247 * Note an entry in the log. Use the mean from the logged samples,
2248 * making sure to properly round up. Only write a log entry if we
2249 * had actual samples done.
2250 */
2251 if (iolog->avg_window[ddir].samples) {
2252 union io_sample_data data;
2253
2254 if (log_max)
2255 data.val = iolog->avg_window[ddir].max_val;
2256 else
2257 data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
2258
2259 __add_log_sample(iolog, data, ddir, 0, elapsed, 0);
2260 }
2261
2262 reset_io_stat(&iolog->avg_window[ddir]);
2263}
2264
2265static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
2266 bool log_max)
2267{
2268 int ddir;
2269
2270 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
2271 __add_stat_to_log(iolog, ddir, elapsed, log_max);
2272}
2273
2274static long add_log_sample(struct thread_data *td, struct io_log *iolog,
2275 union io_sample_data data, enum fio_ddir ddir,
2276 unsigned int bs, uint64_t offset)
2277{
2278 unsigned long elapsed, this_window;
2279
2280 if (!ddir_rw(ddir))
2281 return 0;
2282
2283 elapsed = mtime_since_now(&td->epoch);
2284
2285 /*
2286 * If no time averaging, just add the log sample.
2287 */
2288 if (!iolog->avg_msec) {
2289 __add_log_sample(iolog, data, ddir, bs, elapsed, offset);
2290 return 0;
2291 }
2292
2293 /*
2294 * Add the sample. If the time period has passed, then
2295 * add that entry to the log and clear.
2296 */
2297 add_stat_sample(&iolog->avg_window[ddir], data.val);
2298
2299 /*
2300 * If period hasn't passed, adding the above sample is all we
2301 * need to do.
2302 */
2303 this_window = elapsed - iolog->avg_last[ddir];
2304 if (elapsed < iolog->avg_last[ddir])
2305 return iolog->avg_last[ddir] - elapsed;
2306 else if (this_window < iolog->avg_msec) {
2307 int diff = iolog->avg_msec - this_window;
2308
2309 if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
2310 return diff;
2311 }
2312
2313 __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0);
2314
2315 iolog->avg_last[ddir] = elapsed - (this_window - iolog->avg_msec);
2316 return iolog->avg_msec;
2317}
2318
2319void finalize_logs(struct thread_data *td, bool unit_logs)
2320{
2321 unsigned long elapsed;
2322
2323 elapsed = mtime_since_now(&td->epoch);
2324
2325 if (td->clat_log && unit_logs)
2326 _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
2327 if (td->slat_log && unit_logs)
2328 _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
2329 if (td->lat_log && unit_logs)
2330 _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
2331 if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
2332 _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
2333 if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
2334 _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
2335}
2336
2337void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned int bs)
2338{
2339 struct io_log *iolog;
2340
2341 if (!ddir_rw(ddir))
2342 return;
2343
2344 iolog = agg_io_log[ddir];
2345 __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0);
2346}
2347
2348static void add_clat_percentile_sample(struct thread_stat *ts,
2349 unsigned long long nsec, enum fio_ddir ddir)
2350{
2351 unsigned int idx = plat_val_to_idx(nsec);
2352 assert(idx < FIO_IO_U_PLAT_NR);
2353
2354 ts->io_u_plat[ddir][idx]++;
2355}
2356
2357void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
2358 unsigned long long nsec, unsigned int bs, uint64_t offset)
2359{
2360 unsigned long elapsed, this_window;
2361 struct thread_stat *ts = &td->ts;
2362 struct io_log *iolog = td->clat_hist_log;
2363
2364 td_io_u_lock(td);
2365
2366 add_stat_sample(&ts->clat_stat[ddir], nsec);
2367
2368 if (td->clat_log)
2369 add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs,
2370 offset);
2371
2372 if (ts->clat_percentiles)
2373 add_clat_percentile_sample(ts, nsec, ddir);
2374
2375 if (iolog && iolog->hist_msec) {
2376 struct io_hist *hw = &iolog->hist_window[ddir];
2377
2378 hw->samples++;
2379 elapsed = mtime_since_now(&td->epoch);
2380 if (!hw->hist_last)
2381 hw->hist_last = elapsed;
2382 this_window = elapsed - hw->hist_last;
2383
2384 if (this_window >= iolog->hist_msec) {
2385 unsigned int *io_u_plat;
2386 struct io_u_plat_entry *dst;
2387
2388 /*
2389 * Make a byte-for-byte copy of the latency histogram
2390 * stored in td->ts.io_u_plat[ddir], recording it in a
2391 * log sample. Note that the matching call to free() is
2392 * located in iolog.c after printing this sample to the
2393 * log file.
2394 */
2395 io_u_plat = (unsigned int *) td->ts.io_u_plat[ddir];
2396 dst = malloc(sizeof(struct io_u_plat_entry));
2397 memcpy(&(dst->io_u_plat), io_u_plat,
2398 FIO_IO_U_PLAT_NR * sizeof(unsigned int));
2399 flist_add(&dst->list, &hw->list);
2400 __add_log_sample(iolog, sample_plat(dst), ddir, bs,
2401 elapsed, offset);
2402
2403 /*
2404 * Update the last time we recorded as being now, minus
2405 * any drift in time we encountered before actually
2406 * making the record.
2407 */
2408 hw->hist_last = elapsed - (this_window - iolog->hist_msec);
2409 hw->samples = 0;
2410 }
2411 }
2412
2413 td_io_u_unlock(td);
2414}
2415
2416void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
2417 unsigned long usec, unsigned int bs, uint64_t offset)
2418{
2419 struct thread_stat *ts = &td->ts;
2420
2421 if (!ddir_rw(ddir))
2422 return;
2423
2424 td_io_u_lock(td);
2425
2426 add_stat_sample(&ts->slat_stat[ddir], usec);
2427
2428 if (td->slat_log)
2429 add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset);
2430
2431 td_io_u_unlock(td);
2432}
2433
2434void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
2435 unsigned long long nsec, unsigned int bs, uint64_t offset)
2436{
2437 struct thread_stat *ts = &td->ts;
2438
2439 if (!ddir_rw(ddir))
2440 return;
2441
2442 td_io_u_lock(td);
2443
2444 add_stat_sample(&ts->lat_stat[ddir], nsec);
2445
2446 if (td->lat_log)
2447 add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs,
2448 offset);
2449
2450 if (ts->lat_percentiles)
2451 add_clat_percentile_sample(ts, nsec, ddir);
2452
2453 td_io_u_unlock(td);
2454}
2455
2456void add_bw_sample(struct thread_data *td, struct io_u *io_u,
2457 unsigned int bytes, unsigned long long spent)
2458{
2459 struct thread_stat *ts = &td->ts;
2460 unsigned long rate;
2461
2462 if (spent)
2463 rate = (unsigned long) (bytes * 1000000ULL / spent);
2464 else
2465 rate = 0;
2466
2467 td_io_u_lock(td);
2468
2469 add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
2470
2471 if (td->bw_log)
2472 add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
2473 bytes, io_u->offset);
2474
2475 td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
2476 td_io_u_unlock(td);
2477}
2478
2479static int __add_samples(struct thread_data *td, struct timespec *parent_tv,
2480 struct timespec *t, unsigned int avg_time,
2481 uint64_t *this_io_bytes, uint64_t *stat_io_bytes,
2482 struct io_stat *stat, struct io_log *log,
2483 bool is_kb)
2484{
2485 unsigned long spent, rate;
2486 enum fio_ddir ddir;
2487 unsigned int next, next_log;
2488
2489 next_log = avg_time;
2490
2491 spent = mtime_since(parent_tv, t);
2492 if (spent < avg_time && avg_time - spent >= LOG_MSEC_SLACK)
2493 return avg_time - spent;
2494
2495 td_io_u_lock(td);
2496
2497 /*
2498 * Compute both read and write rates for the interval.
2499 */
2500 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
2501 uint64_t delta;
2502
2503 delta = this_io_bytes[ddir] - stat_io_bytes[ddir];
2504 if (!delta)
2505 continue; /* No entries for interval */
2506
2507 if (spent) {
2508 if (is_kb)
2509 rate = delta * 1000 / spent / 1024; /* KiB/s */
2510 else
2511 rate = (delta * 1000) / spent;
2512 } else
2513 rate = 0;
2514
2515 add_stat_sample(&stat[ddir], rate);
2516
2517 if (log) {
2518 unsigned int bs = 0;
2519
2520 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
2521 bs = td->o.min_bs[ddir];
2522
2523 next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0);
2524 next_log = min(next_log, next);
2525 }
2526
2527 stat_io_bytes[ddir] = this_io_bytes[ddir];
2528 }
2529
2530 timespec_add_msec(parent_tv, avg_time);
2531
2532 td_io_u_unlock(td);
2533
2534 if (spent <= avg_time)
2535 next = avg_time;
2536 else
2537 next = avg_time - (1 + spent - avg_time);
2538
2539 return min(next, next_log);
2540}
2541
2542static int add_bw_samples(struct thread_data *td, struct timespec *t)
2543{
2544 return __add_samples(td, &td->bw_sample_time, t, td->o.bw_avg_time,
2545 td->this_io_bytes, td->stat_io_bytes,
2546 td->ts.bw_stat, td->bw_log, true);
2547}
2548
2549void add_iops_sample(struct thread_data *td, struct io_u *io_u,
2550 unsigned int bytes)
2551{
2552 struct thread_stat *ts = &td->ts;
2553
2554 td_io_u_lock(td);
2555
2556 add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
2557
2558 if (td->iops_log)
2559 add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
2560 bytes, io_u->offset);
2561
2562 td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
2563 td_io_u_unlock(td);
2564}
2565
2566static int add_iops_samples(struct thread_data *td, struct timespec *t)
2567{
2568 return __add_samples(td, &td->iops_sample_time, t, td->o.iops_avg_time,
2569 td->this_io_blocks, td->stat_io_blocks,
2570 td->ts.iops_stat, td->iops_log, false);
2571}
2572
2573/*
2574 * Returns msecs to next event
2575 */
2576int calc_log_samples(void)
2577{
2578 struct thread_data *td;
2579 unsigned int next = ~0U, tmp;
2580 struct timespec now;
2581 int i;
2582
2583 fio_gettime(&now, NULL);
2584
2585 for_each_td(td, i) {
2586 if (!td->o.stats)
2587 continue;
2588 if (in_ramp_time(td) ||
2589 !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
2590 next = min(td->o.iops_avg_time, td->o.bw_avg_time);
2591 continue;
2592 }
2593 if (!td->bw_log ||
2594 (td->bw_log && !per_unit_log(td->bw_log))) {
2595 tmp = add_bw_samples(td, &now);
2596 if (tmp < next)
2597 next = tmp;
2598 }
2599 if (!td->iops_log ||
2600 (td->iops_log && !per_unit_log(td->iops_log))) {
2601 tmp = add_iops_samples(td, &now);
2602 if (tmp < next)
2603 next = tmp;
2604 }
2605 }
2606
2607 return next == ~0U ? 0 : next;
2608}
2609
2610void stat_init(void)
2611{
2612 stat_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
2613}
2614
2615void stat_exit(void)
2616{
2617 /*
2618 * When we have the mutex, we know out-of-band access to it
2619 * have ended.
2620 */
2621 fio_mutex_down(stat_mutex);
2622 fio_mutex_remove(stat_mutex);
2623}
2624
2625/*
2626 * Called from signal handler. Wake up status thread.
2627 */
2628void show_running_run_stats(void)
2629{
2630 helper_do_stat();
2631}
2632
2633uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)
2634{
2635 /* Ignore io_u's which span multiple blocks--they will just get
2636 * inaccurate counts. */
2637 int idx = (io_u->offset - io_u->file->file_offset)
2638 / td->o.bs[DDIR_TRIM];
2639 uint32_t *info = &td->ts.block_infos[idx];
2640 assert(idx < td->ts.nr_block_infos);
2641 return info;
2642}