Add group reporting
[fio.git] / stat.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <string.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <dirent.h>
7#include <libgen.h>
8#include <math.h>
9#include <assert.h>
10
11#include "fio.h"
12
13static struct itimerval itimer;
14static struct list_head disk_list = LIST_HEAD_INIT(disk_list);
15static dev_t last_dev;
16
17/*
18 * Cheesy number->string conversion, complete with carry rounding error.
19 */
20static char *num2str(unsigned long num, int maxlen, int base)
21{
22 /*
23 * could be passed in for 10^3 base, but every caller expects
24 * 2^10 base right now.
25 */
26 const unsigned int thousand = 1024;
27 char postfix[] = { 'K', 'M', 'G', 'P', 'E' };
28 char *buf;
29 int i;
30
31 buf = malloc(128);
32
33 for (i = 0; base > 1; i++)
34 base /= thousand;
35
36 do {
37 int len, carry = 0;
38
39 len = sprintf(buf, "%'lu", num);
40 if (len <= maxlen) {
41 buf[len] = postfix[i];
42 buf[len + 1] = '\0';
43 return buf;
44 }
45
46 if ((num % thousand) >= (thousand / 2))
47 carry = 1;
48
49 num /= thousand;
50 num += carry;
51 i++;
52 } while (i <= 5);
53
54 return buf;
55}
56
57static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
58{
59 unsigned in_flight;
60 char line[256];
61 FILE *f;
62 char *p;
63
64 f = fopen(du->path, "r");
65 if (!f)
66 return 1;
67
68 p = fgets(line, sizeof(line), f);
69 if (!p) {
70 fclose(f);
71 return 1;
72 }
73
74 if (sscanf(p, "%u %u %llu %u %u %u %llu %u %u %u %u\n", &dus->ios[0], &dus->merges[0], &dus->sectors[0], &dus->ticks[0], &dus->ios[1], &dus->merges[1], &dus->sectors[1], &dus->ticks[1], &in_flight, &dus->io_ticks, &dus->time_in_queue) != 11) {
75 fclose(f);
76 return 1;
77 }
78
79 fclose(f);
80 return 0;
81}
82
83static void update_io_tick_disk(struct disk_util *du)
84{
85 struct disk_util_stat __dus, *dus, *ldus;
86 struct timeval t;
87
88 if (get_io_ticks(du, &__dus))
89 return;
90
91 dus = &du->dus;
92 ldus = &du->last_dus;
93
94 dus->sectors[0] += (__dus.sectors[0] - ldus->sectors[0]);
95 dus->sectors[1] += (__dus.sectors[1] - ldus->sectors[1]);
96 dus->ios[0] += (__dus.ios[0] - ldus->ios[0]);
97 dus->ios[1] += (__dus.ios[1] - ldus->ios[1]);
98 dus->merges[0] += (__dus.merges[0] - ldus->merges[0]);
99 dus->merges[1] += (__dus.merges[1] - ldus->merges[1]);
100 dus->ticks[0] += (__dus.ticks[0] - ldus->ticks[0]);
101 dus->ticks[1] += (__dus.ticks[1] - ldus->ticks[1]);
102 dus->io_ticks += (__dus.io_ticks - ldus->io_ticks);
103 dus->time_in_queue += (__dus.time_in_queue - ldus->time_in_queue);
104
105 fio_gettime(&t, NULL);
106 du->msec += mtime_since(&du->time, &t);
107 memcpy(&du->time, &t, sizeof(t));
108 memcpy(ldus, &__dus, sizeof(__dus));
109}
110
111void update_io_ticks(void)
112{
113 struct list_head *entry;
114 struct disk_util *du;
115
116 list_for_each(entry, &disk_list) {
117 du = list_entry(entry, struct disk_util, list);
118 update_io_tick_disk(du);
119 }
120}
121
122static int disk_util_exists(dev_t dev)
123{
124 struct list_head *entry;
125 struct disk_util *du;
126
127 list_for_each(entry, &disk_list) {
128 du = list_entry(entry, struct disk_util, list);
129
130 if (du->dev == dev)
131 return 1;
132 }
133
134 return 0;
135}
136
137static void disk_util_add(dev_t dev, char *path)
138{
139 struct disk_util *du, *__du;
140 struct list_head *entry;
141
142 du = malloc(sizeof(*du));
143 memset(du, 0, sizeof(*du));
144 INIT_LIST_HEAD(&du->list);
145 sprintf(du->path, "%s/stat", path);
146 du->name = strdup(basename(path));
147 du->dev = dev;
148
149 list_for_each(entry, &disk_list) {
150 __du = list_entry(entry, struct disk_util, list);
151
152 if (!strcmp(du->name, __du->name)) {
153 free(du->name);
154 free(du);
155 return;
156 }
157 }
158
159 fio_gettime(&du->time, NULL);
160 get_io_ticks(du, &du->last_dus);
161
162 list_add_tail(&du->list, &disk_list);
163}
164
165static int check_dev_match(dev_t dev, char *path)
166{
167 unsigned int major, minor;
168 char line[256], *p;
169 FILE *f;
170
171 f = fopen(path, "r");
172 if (!f) {
173 perror("open path");
174 return 1;
175 }
176
177 p = fgets(line, sizeof(line), f);
178 if (!p) {
179 fclose(f);
180 return 1;
181 }
182
183 if (sscanf(p, "%u:%u", &major, &minor) != 2) {
184 fclose(f);
185 return 1;
186 }
187
188 if (((major << 8) | minor) == dev) {
189 fclose(f);
190 return 0;
191 }
192
193 fclose(f);
194 return 1;
195}
196
197static int find_block_dir(dev_t dev, char *path)
198{
199 struct dirent *dir;
200 struct stat st;
201 int found = 0;
202 DIR *D;
203
204 D = opendir(path);
205 if (!D)
206 return 0;
207
208 while ((dir = readdir(D)) != NULL) {
209 char full_path[256];
210
211 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
212 continue;
213
214 sprintf(full_path, "%s/%s", path, dir->d_name);
215
216 if (!strcmp(dir->d_name, "dev")) {
217 if (!check_dev_match(dev, full_path)) {
218 found = 1;
219 break;
220 }
221 }
222
223 if (lstat(full_path, &st) == -1) {
224 perror("stat");
225 break;
226 }
227
228 if (!S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))
229 continue;
230
231 found = find_block_dir(dev, full_path);
232 if (found) {
233 strcpy(path, full_path);
234 break;
235 }
236 }
237
238 closedir(D);
239 return found;
240}
241
242void init_disk_util(struct thread_data *td)
243{
244 struct fio_file *f;
245 struct stat st;
246 char foo[PATH_MAX], tmp[PATH_MAX];
247 dev_t dev;
248 char *p;
249
250 if (!td->do_disk_util || (td->io_ops->flags & FIO_DISKLESSIO))
251 return;
252
253 /*
254 * Just use the same file, they are on the same device.
255 */
256 f = &td->files[0];
257 if (!stat(f->file_name, &st)) {
258 if (S_ISBLK(st.st_mode))
259 dev = st.st_rdev;
260 else
261 dev = st.st_dev;
262 } else {
263 /*
264 * must be a file, open "." in that path
265 */
266 strncpy(foo, f->file_name, PATH_MAX - 1);
267 p = dirname(foo);
268 if (stat(p, &st)) {
269 perror("disk util stat");
270 return;
271 }
272
273 dev = st.st_dev;
274 }
275
276 if (disk_util_exists(dev))
277 return;
278
279 /*
280 * for an fs without a device, we will repeatedly stat through
281 * sysfs which can take oodles of time for thousands of files. so
282 * cache the last lookup and compare with that before going through
283 * everything again.
284 */
285 if (dev == last_dev)
286 return;
287
288 last_dev = dev;
289
290 sprintf(foo, "/sys/block");
291 if (!find_block_dir(dev, foo))
292 return;
293
294 /*
295 * If there's a ../queue/ directory there, we are inside a partition.
296 * Check if that is the case and jump back. For loop/md/dm etc we
297 * are already in the right spot.
298 */
299 sprintf(tmp, "%s/../queue", foo);
300 if (!stat(tmp, &st)) {
301 p = dirname(foo);
302 sprintf(tmp, "%s/queue", p);
303 if (stat(tmp, &st)) {
304 log_err("unknown sysfs layout\n");
305 return;
306 }
307 strncpy(tmp, p, PATH_MAX - 1);
308 sprintf(foo, "%s", tmp);
309 }
310
311 if (td->ioscheduler)
312 td->sysfs_root = strdup(foo);
313
314 disk_util_add(dev, foo);
315}
316
317void disk_util_timer_arm(void)
318{
319 itimer.it_value.tv_sec = 0;
320 itimer.it_value.tv_usec = DISK_UTIL_MSEC * 1000;
321 setitimer(ITIMER_REAL, &itimer, NULL);
322}
323
324void update_rusage_stat(struct thread_data *td)
325{
326 struct thread_stat *ts = &td->ts;
327
328 getrusage(RUSAGE_SELF, &ts->ru_end);
329
330 ts->usr_time += mtime_since(&ts->ru_start.ru_utime, &ts->ru_end.ru_utime);
331 ts->sys_time += mtime_since(&ts->ru_start.ru_stime, &ts->ru_end.ru_stime);
332 ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
333
334 memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
335}
336
337static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
338 double *mean, double *dev)
339{
340 double n = is->samples;
341
342 if (is->samples == 0)
343 return 0;
344
345 *min = is->min_val;
346 *max = is->max_val;
347
348 n = (double) is->samples;
349 *mean = is->mean;
350
351 if (n > 1.0)
352 *dev = sqrt(is->S / (n - 1.0));
353 else
354 *dev = -1.0;
355
356 return 1;
357}
358
359static void show_group_stats(struct group_run_stats *rs, int id)
360{
361 char *p1, *p2, *p3, *p4;
362 const char *ddir_str[] = { " READ", " WRITE" };
363 int i;
364
365 fprintf(f_out, "\nRun status group %d (all jobs):\n", id);
366
367 for (i = 0; i <= DDIR_WRITE; i++) {
368 if (!rs->max_run[i])
369 continue;
370
371 p1 = num2str(rs->io_kb[i], 6, 1);
372 p2 = num2str(rs->agg[i], 6, 1);
373 p3 = num2str(rs->min_bw[i], 6, 1);
374 p4 = num2str(rs->max_bw[i], 6, 1);
375
376 fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[0], rs->max_run[0]);
377
378 free(p1);
379 free(p2);
380 free(p3);
381 free(p4);
382 }
383}
384
385static void show_disk_util(void)
386{
387 struct disk_util_stat *dus;
388 struct list_head *entry, *next;
389 struct disk_util *du;
390 double util;
391
392 fprintf(f_out, "\nDisk stats (read/write):\n");
393
394 list_for_each(entry, &disk_list) {
395 du = list_entry(entry, struct disk_util, list);
396 dus = &du->dus;
397
398 util = (double) 100 * du->dus.io_ticks / (double) du->msec;
399 if (util > 100.0)
400 util = 100.0;
401
402 fprintf(f_out, " %s: ios=%u/%u, merge=%u/%u, ticks=%u/%u, in_queue=%u, util=%3.2f%%\n", du->name, dus->ios[0], dus->ios[1], dus->merges[0], dus->merges[1], dus->ticks[0], dus->ticks[1], dus->time_in_queue, util);
403 }
404
405 /*
406 * now free the list
407 */
408 list_for_each_safe(entry, next, &disk_list) {
409 list_del(entry);
410 du = list_entry(entry, struct disk_util, list);
411 free(du->name);
412 free(du);
413 }
414}
415
416static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
417 int ddir)
418{
419 const char *ddir_str[] = { "read ", "write" };
420 unsigned long min, max;
421 unsigned long long bw;
422 double mean, dev;
423 char *io_p, *bw_p;
424
425 if (!ts->runtime[ddir])
426 return;
427
428 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
429 io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1);
430 bw_p = num2str(bw, 6, 1);
431
432 fprintf(f_out, " %s: io=%siB, bw=%siB/s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, ts->runtime[ddir]);
433
434 free(io_p);
435 free(bw_p);
436
437 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
438 fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
439
440 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
441 fprintf(f_out, " clat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
442
443 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
444 double p_of_agg;
445
446 p_of_agg = mean * 100 / (double) rs->agg[ddir];
447 fprintf(f_out, " bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, stdev=%5.02f\n", min, max, p_of_agg, mean, dev);
448 }
449}
450
451static void show_thread_status(struct thread_stat *ts,
452 struct group_run_stats *rs)
453{
454 double usr_cpu, sys_cpu;
455 unsigned long runtime;
456 double io_u_dist[FIO_IO_U_MAP_NR];
457 double io_u_lat[FIO_IO_U_LAT_NR];
458 int i;
459
460 if (!(ts->io_bytes[0] + ts->io_bytes[1]))
461 return;
462
463 if (!ts->error)
464 fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->error, ts->pid);
465 else
466 fprintf(f_out, "%s: (groupid=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->error, ts->verror, ts->pid);
467
468 if (ts->io_bytes[DDIR_READ])
469 show_ddir_status(rs, ts, DDIR_READ);
470 if (ts->io_bytes[DDIR_WRITE])
471 show_ddir_status(rs, ts, DDIR_WRITE);
472
473 runtime = ts->total_run_time;
474 if (runtime) {
475 double runt = (double) runtime;
476
477 usr_cpu = (double) ts->usr_time * 100 / runt;
478 sys_cpu = (double) ts->sys_time * 100 / runt;
479 } else {
480 usr_cpu = 0;
481 sys_cpu = 0;
482 }
483
484 fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
485
486 /*
487 * Do depth distribution calculations
488 */
489 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
490 io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts->total_io_u;
491 io_u_dist[i] *= 100.0;
492 }
493
494 fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
495
496 /*
497 * Do latency distribution calculations
498 */
499 for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
500 io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts->total_io_u;
501 io_u_lat[i] *= 100.0;
502 }
503
504 fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
505 fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
506
507 if (ts->description)
508 fprintf(f_out, "%s\n", ts->description);
509}
510
511static void show_ddir_status_terse(struct thread_stat *ts,
512 struct group_run_stats *rs, int ddir)
513{
514 unsigned long min, max;
515 unsigned long long bw;
516 double mean, dev;
517
518 bw = 0;
519 if (ts->runtime[ddir])
520 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
521
522 fprintf(f_out, ",%llu,%llu,%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
523
524 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
525 fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
526 else
527 fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
528
529 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
530 fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
531 else
532 fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
533
534 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
535 double p_of_agg;
536
537 p_of_agg = mean * 100 / (double) rs->agg[ddir];
538 fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", min, max, p_of_agg, mean, dev);
539 } else
540 fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", 0UL, 0UL, 0.0, 0.0, 0.0);
541}
542
543
544static void show_thread_status_terse(struct thread_stat *ts,
545 struct group_run_stats *rs)
546{
547 double usr_cpu, sys_cpu;
548
549 fprintf(f_out, "%s,%d,%d", ts->name, ts->groupid, ts->error);
550
551 show_ddir_status_terse(ts, rs, 0);
552 show_ddir_status_terse(ts, rs, 1);
553
554 if (ts->total_run_time) {
555 double runt = (double) ts->total_run_time;
556
557 usr_cpu = (double) ts->usr_time * 100 / runt;
558 sys_cpu = (double) ts->sys_time * 100 / runt;
559 } else {
560 usr_cpu = 0;
561 sys_cpu = 0;
562 }
563
564 fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, ts->ctx);
565}
566
567static void __sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
568{
569 double mean, S;
570
571 dst->min_val = min(dst->min_val, src->min_val);
572 dst->max_val = max(dst->max_val, src->max_val);
573 dst->samples += src->samples;
574
575 /*
576 * Needs a new method for calculating stddev, we cannot just
577 * average them we do below for nr > 1
578 */
579 if (nr == 1) {
580 mean = src->mean;
581 S = src->S;
582 } else {
583 mean = ((src->mean * (double) nr) + dst->mean) / ((double) nr + 1.0);
584 S = ((src->S * (double) nr) + dst->S) / ((double) nr + 1.0);
585 }
586
587 dst->mean = mean;
588 dst->S = S;
589}
590
591static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
592{
593 __sum_stat(&dst[DDIR_READ], &src[DDIR_READ], nr);
594 __sum_stat(&dst[DDIR_WRITE], &src[DDIR_WRITE], nr);
595}
596
597void show_run_stats(void)
598{
599 struct group_run_stats *runstats, *rs;
600 struct thread_data *td;
601 struct thread_stat *threadstats, *ts;
602 int i, j, k, nr_ts, last_ts, members;
603
604 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
605
606 for (i = 0; i < groupid + 1; i++) {
607 rs = &runstats[i];
608
609 memset(rs, 0, sizeof(*rs));
610 rs->min_bw[0] = rs->min_run[0] = ~0UL;
611 rs->min_bw[1] = rs->min_run[1] = ~0UL;
612 }
613
614 /*
615 * find out how many threads stats we need. if group reporting isn't
616 * enabled, it's one-per-td.
617 */
618 nr_ts = 0;
619 last_ts = -1;
620 for_each_td(td, i) {
621 if (!td->group_reporting) {
622 nr_ts++;
623 continue;
624 }
625 if (last_ts == td->groupid)
626 continue;
627
628 last_ts = td->groupid;
629 nr_ts++;
630 }
631
632 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
633
634 for (i = 0; i < nr_ts; i++) {
635 ts = &threadstats[i];
636
637 memset(ts, 0, sizeof(*ts));
638 ts->clat_stat[0].min_val = -1UL;
639 ts->clat_stat[1].min_val = -1UL;
640 ts->slat_stat[0].min_val = -1UL;
641 ts->slat_stat[1].min_val = -1UL;
642 ts->bw_stat[0].min_val = -1UL;
643 ts->bw_stat[1].min_val = -1UL;
644 }
645
646 j = 0;
647 last_ts = -1;
648 members = 0;
649 for_each_td(td, i) {
650 ts = &threadstats[j];
651
652 members++;
653
654 if (!ts->groupid) {
655 ts->name = td->name;
656 ts->description = td->description;
657 ts->error = td->error;
658 ts->groupid = td->groupid;
659 ts->pid = td->pid;
660 ts->verror = td->verror;
661 }
662
663 sum_stat(ts->clat_stat, td->ts.clat_stat, members);
664 sum_stat(ts->slat_stat, td->ts.slat_stat, members);
665 sum_stat(ts->bw_stat, td->ts.bw_stat, members);
666
667 ts->stat_io_bytes[0] += td->ts.stat_io_bytes[0];
668 ts->stat_io_bytes[1] += td->ts.stat_io_bytes[1];
669
670 ts->usr_time += td->ts.usr_time;
671 ts->sys_time += td->ts.sys_time;
672 ts->ctx += td->ts.ctx;
673
674 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
675 ts->io_u_map[k] += td->ts.io_u_map[k];
676 for (k = 0; k < FIO_IO_U_LAT_NR; k++)
677 ts->io_u_lat[k] += td->ts.io_u_lat[k];
678
679 ts->total_io_u += td->ts.total_io_u;
680 ts->io_bytes[0] += td->ts.io_bytes[0];
681 ts->io_bytes[1] += td->ts.io_bytes[1];
682
683 if (ts->runtime[0] < td->ts.runtime[0])
684 ts->runtime[0] = td->ts.runtime[0];
685 if (ts->runtime[1] < td->ts.runtime[1])
686 ts->runtime[1] = td->ts.runtime[1];
687
688 ts->total_run_time += td->ts.total_run_time;
689
690 if (!td->group_reporting) {
691 members = 0;
692 j++;
693 continue;
694 }
695 if (last_ts == td->groupid)
696 continue;
697
698 if (last_ts != -1) {
699 members = 0;
700 j++;
701 }
702
703 last_ts = td->groupid;
704 }
705
706 for (i = 0; i < nr_ts; i++) {
707 unsigned long long rbw, wbw;
708
709 ts = &threadstats[i];
710 rs = &runstats[ts->groupid];
711
712 if (ts->runtime[0] < rs->min_run[0] || !rs->min_run[0])
713 rs->min_run[0] = ts->runtime[0];
714 if (ts->runtime[0] > rs->max_run[0])
715 rs->max_run[0] = ts->runtime[0];
716 if (ts->runtime[1] < rs->min_run[1] || !rs->min_run[1])
717 rs->min_run[1] = ts->runtime[1];
718 if (ts->runtime[1] > rs->max_run[1])
719 rs->max_run[1] = ts->runtime[1];
720
721 rbw = wbw = 0;
722 if (ts->runtime[0])
723 rbw = td->io_bytes[0] / (unsigned long long) ts->runtime[0];
724 if (ts->runtime[1])
725 wbw = td->io_bytes[1] / (unsigned long long) ts->runtime[1];
726
727 if (rbw < rs->min_bw[0])
728 rs->min_bw[0] = rbw;
729 if (wbw < rs->min_bw[1])
730 rs->min_bw[1] = wbw;
731 if (rbw > rs->max_bw[0])
732 rs->max_bw[0] = rbw;
733 if (wbw > rs->max_bw[1])
734 rs->max_bw[1] = wbw;
735
736 rs->io_kb[0] += ts->io_bytes[0] >> 10;
737 rs->io_kb[1] += ts->io_bytes[1] >> 10;
738 }
739
740 for (i = 0; i < groupid + 1; i++) {
741 rs = &runstats[i];
742
743 if (rs->max_run[0])
744 rs->agg[0] = (rs->io_kb[0]*1024) / rs->max_run[0];
745 if (rs->max_run[1])
746 rs->agg[1] = (rs->io_kb[1]*1024) / rs->max_run[1];
747 }
748
749 /*
750 * don't overwrite last signal output
751 */
752 if (!terse_output)
753 printf("\n");
754
755 for (i = 0; i < nr_ts; i++) {
756 ts = &threadstats[i];
757 rs = &runstats[ts->groupid];
758
759 if (terse_output)
760 show_thread_status_terse(ts, rs);
761 else
762 show_thread_status(ts, rs);
763 }
764
765 if (!terse_output) {
766 for (i = 0; i < groupid + 1; i++)
767 show_group_stats(&runstats[i], i);
768
769 show_disk_util();
770 }
771
772 free(runstats);
773 free(threadstats);
774}
775
776static inline void add_stat_sample(struct io_stat *is, unsigned long data)
777{
778 double val = data;
779 double delta, n;
780
781 if (data > is->max_val)
782 is->max_val = data;
783 if (data < is->min_val)
784 is->min_val = data;
785
786 delta = val - is->mean;
787 n = is->samples + 1.0;
788 is->mean += delta / n;
789 is->S += delta * (val - is->mean);
790
791 is->samples++;
792}
793
794static void __add_log_sample(struct io_log *iolog, unsigned long val,
795 enum fio_ddir ddir, unsigned long time)
796{
797 if (iolog->nr_samples == iolog->max_samples) {
798 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
799
800 iolog->log = realloc(iolog->log, new_size);
801 iolog->max_samples <<= 1;
802 }
803
804 iolog->log[iolog->nr_samples].val = val;
805 iolog->log[iolog->nr_samples].time = time;
806 iolog->log[iolog->nr_samples].ddir = ddir;
807 iolog->nr_samples++;
808}
809
810static void add_log_sample(struct thread_data *td, struct io_log *iolog,
811 unsigned long val, enum fio_ddir ddir)
812{
813 __add_log_sample(iolog, val, ddir, mtime_since_now(&td->epoch));
814}
815
816void add_agg_sample(unsigned long val, enum fio_ddir ddir)
817{
818 struct io_log *iolog = agg_io_log[ddir];
819
820 __add_log_sample(iolog, val, ddir, mtime_since_genesis());
821}
822
823void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
824 unsigned long msec)
825{
826 struct thread_stat *ts = &td->ts;
827
828 add_stat_sample(&ts->clat_stat[ddir], msec);
829
830 if (ts->clat_log)
831 add_log_sample(td, ts->clat_log, msec, ddir);
832}
833
834void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
835 unsigned long msec)
836{
837 struct thread_stat *ts = &td->ts;
838
839 add_stat_sample(&ts->slat_stat[ddir], msec);
840
841 if (ts->slat_log)
842 add_log_sample(td, ts->slat_log, msec, ddir);
843}
844
845void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
846 struct timeval *t)
847{
848 struct thread_stat *ts = &td->ts;
849 unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
850 unsigned long rate;
851
852 if (spent < td->bw_avg_time)
853 return;
854
855 rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
856 add_stat_sample(&ts->bw_stat[ddir], rate);
857
858 if (ts->bw_log)
859 add_log_sample(td, ts->bw_log, rate, ddir);
860
861 fio_gettime(&ts->stat_sample_time[ddir], NULL);
862 ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
863}