Improve job grouping
[fio.git] / stat.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <string.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <dirent.h>
7#include <libgen.h>
8#include <math.h>
9
10#include "fio.h"
11
12static struct itimerval itimer;
13static struct list_head disk_list = LIST_HEAD_INIT(disk_list);
14static dev_t last_dev;
15
16/*
17 * Cheesy number->string conversion, complete with carry rounding error.
18 */
19static char *num2str(unsigned long num, int maxlen, int base, int pow2)
20{
21 char postfix[] = { ' ', 'K', 'M', 'G', 'P', 'E' };
22 unsigned int thousand;
23 char *buf;
24 int i;
25
26 if (pow2)
27 thousand = 1024;
28 else
29 thousand = 1000;
30
31 buf = malloc(128);
32
33 for (i = 0; base > 1; i++)
34 base /= thousand;
35
36 do {
37 int len, carry = 0;
38
39 len = sprintf(buf, "%'lu", num);
40 if (len <= maxlen) {
41 if (i >= 1) {
42 buf[len] = postfix[i];
43 buf[len + 1] = '\0';
44 }
45 return buf;
46 }
47
48 if ((num % thousand) >= (thousand / 2))
49 carry = 1;
50
51 num /= thousand;
52 num += carry;
53 i++;
54 } while (i <= 5);
55
56 return buf;
57}
58
59static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
60{
61 unsigned in_flight;
62 char line[256];
63 FILE *f;
64 char *p;
65
66 f = fopen(du->path, "r");
67 if (!f)
68 return 1;
69
70 p = fgets(line, sizeof(line), f);
71 if (!p) {
72 fclose(f);
73 return 1;
74 }
75
76 if (sscanf(p, "%u %u %llu %u %u %u %llu %u %u %u %u\n", &dus->ios[0], &dus->merges[0], &dus->sectors[0], &dus->ticks[0], &dus->ios[1], &dus->merges[1], &dus->sectors[1], &dus->ticks[1], &in_flight, &dus->io_ticks, &dus->time_in_queue) != 11) {
77 fclose(f);
78 return 1;
79 }
80
81 fclose(f);
82 return 0;
83}
84
85static void update_io_tick_disk(struct disk_util *du)
86{
87 struct disk_util_stat __dus, *dus, *ldus;
88 struct timeval t;
89
90 if (get_io_ticks(du, &__dus))
91 return;
92
93 dus = &du->dus;
94 ldus = &du->last_dus;
95
96 dus->sectors[0] += (__dus.sectors[0] - ldus->sectors[0]);
97 dus->sectors[1] += (__dus.sectors[1] - ldus->sectors[1]);
98 dus->ios[0] += (__dus.ios[0] - ldus->ios[0]);
99 dus->ios[1] += (__dus.ios[1] - ldus->ios[1]);
100 dus->merges[0] += (__dus.merges[0] - ldus->merges[0]);
101 dus->merges[1] += (__dus.merges[1] - ldus->merges[1]);
102 dus->ticks[0] += (__dus.ticks[0] - ldus->ticks[0]);
103 dus->ticks[1] += (__dus.ticks[1] - ldus->ticks[1]);
104 dus->io_ticks += (__dus.io_ticks - ldus->io_ticks);
105 dus->time_in_queue += (__dus.time_in_queue - ldus->time_in_queue);
106
107 fio_gettime(&t, NULL);
108 du->msec += mtime_since(&du->time, &t);
109 memcpy(&du->time, &t, sizeof(t));
110 memcpy(ldus, &__dus, sizeof(__dus));
111}
112
113void update_io_ticks(void)
114{
115 struct list_head *entry;
116 struct disk_util *du;
117
118 list_for_each(entry, &disk_list) {
119 du = list_entry(entry, struct disk_util, list);
120 update_io_tick_disk(du);
121 }
122}
123
124static int disk_util_exists(dev_t dev)
125{
126 struct list_head *entry;
127 struct disk_util *du;
128
129 list_for_each(entry, &disk_list) {
130 du = list_entry(entry, struct disk_util, list);
131
132 if (du->dev == dev)
133 return 1;
134 }
135
136 return 0;
137}
138
139static void disk_util_add(dev_t dev, char *path)
140{
141 struct disk_util *du, *__du;
142 struct list_head *entry;
143
144 du = malloc(sizeof(*du));
145 memset(du, 0, sizeof(*du));
146 INIT_LIST_HEAD(&du->list);
147 sprintf(du->path, "%s/stat", path);
148 du->name = strdup(basename(path));
149 du->dev = dev;
150
151 list_for_each(entry, &disk_list) {
152 __du = list_entry(entry, struct disk_util, list);
153
154 if (!strcmp(du->name, __du->name)) {
155 free(du->name);
156 free(du);
157 return;
158 }
159 }
160
161 fio_gettime(&du->time, NULL);
162 get_io_ticks(du, &du->last_dus);
163
164 list_add_tail(&du->list, &disk_list);
165}
166
167static int check_dev_match(dev_t dev, char *path)
168{
169 unsigned int major, minor;
170 char line[256], *p;
171 FILE *f;
172
173 f = fopen(path, "r");
174 if (!f) {
175 perror("open path");
176 return 1;
177 }
178
179 p = fgets(line, sizeof(line), f);
180 if (!p) {
181 fclose(f);
182 return 1;
183 }
184
185 if (sscanf(p, "%u:%u", &major, &minor) != 2) {
186 fclose(f);
187 return 1;
188 }
189
190 if (((major << 8) | minor) == dev) {
191 fclose(f);
192 return 0;
193 }
194
195 fclose(f);
196 return 1;
197}
198
199static int find_block_dir(dev_t dev, char *path)
200{
201 struct dirent *dir;
202 struct stat st;
203 int found = 0;
204 DIR *D;
205
206 D = opendir(path);
207 if (!D)
208 return 0;
209
210 while ((dir = readdir(D)) != NULL) {
211 char full_path[256];
212
213 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
214 continue;
215
216 sprintf(full_path, "%s/%s", path, dir->d_name);
217
218 if (!strcmp(dir->d_name, "dev")) {
219 if (!check_dev_match(dev, full_path)) {
220 found = 1;
221 break;
222 }
223 }
224
225 if (lstat(full_path, &st) == -1) {
226 perror("stat");
227 break;
228 }
229
230 if (!S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))
231 continue;
232
233 found = find_block_dir(dev, full_path);
234 if (found) {
235 strcpy(path, full_path);
236 break;
237 }
238 }
239
240 closedir(D);
241 return found;
242}
243
244void init_disk_util(struct thread_data *td)
245{
246 struct fio_file *f;
247 struct stat st;
248 char foo[PATH_MAX], tmp[PATH_MAX];
249 dev_t dev;
250 char *p;
251
252 if (!td->do_disk_util || (td->io_ops->flags & FIO_DISKLESSIO))
253 return;
254
255 /*
256 * Just use the same file, they are on the same device.
257 */
258 f = &td->files[0];
259 if (!stat(f->file_name, &st)) {
260 if (S_ISBLK(st.st_mode))
261 dev = st.st_rdev;
262 else
263 dev = st.st_dev;
264 } else {
265 /*
266 * must be a file, open "." in that path
267 */
268 strncpy(foo, f->file_name, PATH_MAX - 1);
269 p = dirname(foo);
270 if (stat(p, &st)) {
271 perror("disk util stat");
272 return;
273 }
274
275 dev = st.st_dev;
276 }
277
278 if (disk_util_exists(dev))
279 return;
280
281 /*
282 * for an fs without a device, we will repeatedly stat through
283 * sysfs which can take oodles of time for thousands of files. so
284 * cache the last lookup and compare with that before going through
285 * everything again.
286 */
287 if (dev == last_dev)
288 return;
289
290 last_dev = dev;
291
292 sprintf(foo, "/sys/block");
293 if (!find_block_dir(dev, foo))
294 return;
295
296 /*
297 * If there's a ../queue/ directory there, we are inside a partition.
298 * Check if that is the case and jump back. For loop/md/dm etc we
299 * are already in the right spot.
300 */
301 sprintf(tmp, "%s/../queue", foo);
302 if (!stat(tmp, &st)) {
303 p = dirname(foo);
304 sprintf(tmp, "%s/queue", p);
305 if (stat(tmp, &st)) {
306 log_err("unknown sysfs layout\n");
307 return;
308 }
309 strncpy(tmp, p, PATH_MAX - 1);
310 sprintf(foo, "%s", tmp);
311 }
312
313 if (td->ioscheduler)
314 td->sysfs_root = strdup(foo);
315
316 disk_util_add(dev, foo);
317}
318
319void disk_util_timer_arm(void)
320{
321 itimer.it_value.tv_sec = 0;
322 itimer.it_value.tv_usec = DISK_UTIL_MSEC * 1000;
323 setitimer(ITIMER_REAL, &itimer, NULL);
324}
325
326void update_rusage_stat(struct thread_data *td)
327{
328 struct thread_stat *ts = &td->ts;
329
330 getrusage(RUSAGE_SELF, &ts->ru_end);
331
332 ts->usr_time += mtime_since(&ts->ru_start.ru_utime, &ts->ru_end.ru_utime);
333 ts->sys_time += mtime_since(&ts->ru_start.ru_stime, &ts->ru_end.ru_stime);
334 ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
335
336 memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
337}
338
339static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
340 double *mean, double *dev)
341{
342 double n = is->samples;
343
344 if (is->samples == 0)
345 return 0;
346
347 *min = is->min_val;
348 *max = is->max_val;
349
350 n = (double) is->samples;
351 *mean = is->mean;
352
353 if (n > 1.0)
354 *dev = sqrt(is->S / (n - 1.0));
355 else
356 *dev = -1.0;
357
358 return 1;
359}
360
361static void show_group_stats(struct group_run_stats *rs, int id)
362{
363 char *p1, *p2, *p3, *p4;
364 const char *ddir_str[] = { " READ", " WRITE" };
365 int i;
366
367 fprintf(f_out, "\nRun status group %d (all jobs):\n", id);
368
369 for (i = 0; i <= DDIR_WRITE; i++) {
370 if (!rs->max_run[i])
371 continue;
372
373 p1 = num2str(rs->io_kb[i], 6, 1000, 1);
374 p2 = num2str(rs->agg[i], 6, 1000, 1);
375 p3 = num2str(rs->min_bw[i], 6, 1000, 1);
376 p4 = num2str(rs->max_bw[i], 6, 1000, 1);
377
378 fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]);
379
380 free(p1);
381 free(p2);
382 free(p3);
383 free(p4);
384 }
385}
386
387static void show_disk_util(void)
388{
389 struct disk_util_stat *dus;
390 struct list_head *entry, *next;
391 struct disk_util *du;
392 double util;
393
394 fprintf(f_out, "\nDisk stats (read/write):\n");
395
396 list_for_each(entry, &disk_list) {
397 du = list_entry(entry, struct disk_util, list);
398 dus = &du->dus;
399
400 util = (double) 100 * du->dus.io_ticks / (double) du->msec;
401 if (util > 100.0)
402 util = 100.0;
403
404 fprintf(f_out, " %s: ios=%u/%u, merge=%u/%u, ticks=%u/%u, in_queue=%u, util=%3.2f%%\n", du->name, dus->ios[0], dus->ios[1], dus->merges[0], dus->merges[1], dus->ticks[0], dus->ticks[1], dus->time_in_queue, util);
405 }
406
407 /*
408 * now free the list
409 */
410 list_for_each_safe(entry, next, &disk_list) {
411 list_del(entry);
412 du = list_entry(entry, struct disk_util, list);
413 free(du->name);
414 free(du);
415 }
416}
417
418#define ts_total_io_u(ts) \
419 ((ts)->total_io_u[0] + (ts)->total_io_u[1])
420
421static void stat_calc_dist(struct thread_stat *ts, double *io_u_dist)
422{
423 int i;
424
425 /*
426 * Do depth distribution calculations
427 */
428 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
429 io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts_total_io_u(ts);
430 io_u_dist[i] *= 100.0;
431 }
432}
433
434static void stat_calc_lat(struct thread_stat *ts, double *io_u_lat)
435{
436 int i;
437
438 /*
439 * Do latency distribution calculations
440 */
441 for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
442 io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts_total_io_u(ts);
443 io_u_lat[i] *= 100.0;
444 }
445}
446
447static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
448 int ddir)
449{
450 const char *ddir_str[] = { "read ", "write" };
451 unsigned long min, max;
452 unsigned long long bw, iops;
453 double mean, dev;
454 char *io_p, *bw_p, *iops_p;
455
456 if (!ts->runtime[ddir])
457 return;
458
459 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
460 iops = (1000 * ts->total_io_u[ddir]) / ts->runtime[ddir];
461 io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1000, 1);
462 bw_p = num2str(bw, 6, 1000, 1);
463 iops_p = num2str(iops, 6, 1, 0);
464
465 fprintf(f_out, " %s: io=%siB, bw=%siB/s, iops=%s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, iops_p, ts->runtime[ddir]);
466
467 free(io_p);
468 free(bw_p);
469 free(iops_p);
470
471 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
472 fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
473
474 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
475 fprintf(f_out, " clat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
476
477 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
478 double p_of_agg;
479
480 p_of_agg = mean * 100 / (double) rs->agg[ddir];
481 fprintf(f_out, " bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, stdev=%5.02f\n", min, max, p_of_agg, mean, dev);
482 }
483}
484
485static void show_thread_status(struct thread_stat *ts,
486 struct group_run_stats *rs)
487{
488 double usr_cpu, sys_cpu;
489 unsigned long runtime;
490 double io_u_dist[FIO_IO_U_MAP_NR];
491 double io_u_lat[FIO_IO_U_LAT_NR];
492
493 if (!(ts->io_bytes[0] + ts->io_bytes[1]))
494 return;
495
496 if (!ts->error)
497 fprintf(f_out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->pid);
498 else
499 fprintf(f_out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->verror, ts->pid);
500
501 if (ts->description)
502 fprintf(f_out, " Description : [%s]\n", ts->description);
503
504 if (ts->io_bytes[DDIR_READ])
505 show_ddir_status(rs, ts, DDIR_READ);
506 if (ts->io_bytes[DDIR_WRITE])
507 show_ddir_status(rs, ts, DDIR_WRITE);
508
509 runtime = ts->total_run_time;
510 if (runtime) {
511 double runt = (double) runtime;
512
513 usr_cpu = (double) ts->usr_time * 100 / runt;
514 sys_cpu = (double) ts->sys_time * 100 / runt;
515 } else {
516 usr_cpu = 0;
517 sys_cpu = 0;
518 }
519
520 fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
521
522 stat_calc_dist(ts, io_u_dist);
523 stat_calc_lat(ts, io_u_lat);
524
525 fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
526
527 fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
528 fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
529}
530
531static void show_ddir_status_terse(struct thread_stat *ts,
532 struct group_run_stats *rs, int ddir)
533{
534 unsigned long min, max;
535 unsigned long long bw;
536 double mean, dev;
537
538 bw = 0;
539 if (ts->runtime[ddir])
540 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
541
542 fprintf(f_out, ";%llu;%llu;%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
543
544 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
545 fprintf(f_out, ";%lu;%lu;%f;%f", min, max, mean, dev);
546 else
547 fprintf(f_out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
548
549 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
550 fprintf(f_out, ";%lu;%lu;%f;%f", min, max, mean, dev);
551 else
552 fprintf(f_out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
553
554 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
555 double p_of_agg;
556
557 p_of_agg = mean * 100 / (double) rs->agg[ddir];
558 fprintf(f_out, ";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
559 } else
560 fprintf(f_out, ";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
561}
562
563
564static void show_thread_status_terse(struct thread_stat *ts,
565 struct group_run_stats *rs)
566{
567 double io_u_dist[FIO_IO_U_MAP_NR];
568 double io_u_lat[FIO_IO_U_LAT_NR];
569 double usr_cpu, sys_cpu;
570
571 fprintf(f_out, "%s;%d;%d", ts->name, ts->groupid, ts->error);
572
573 show_ddir_status_terse(ts, rs, 0);
574 show_ddir_status_terse(ts, rs, 1);
575
576 if (ts->total_run_time) {
577 double runt = (double) ts->total_run_time;
578
579 usr_cpu = (double) ts->usr_time * 100 / runt;
580 sys_cpu = (double) ts->sys_time * 100 / runt;
581 } else {
582 usr_cpu = 0;
583 sys_cpu = 0;
584 }
585
586 fprintf(f_out, ";%f%%;%f%%;%lu", usr_cpu, sys_cpu, ts->ctx);
587
588 stat_calc_dist(ts, io_u_dist);
589 stat_calc_lat(ts, io_u_lat);
590
591 fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
592
593 fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
594 fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
595
596 if (ts->description)
597 fprintf(f_out, ";%s", ts->description);
598
599 fprintf(f_out, "\n");
600}
601
602static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
603{
604 double mean, S;
605
606 dst->min_val = min(dst->min_val, src->min_val);
607 dst->max_val = max(dst->max_val, src->max_val);
608 dst->samples += src->samples;
609
610 /*
611 * Needs a new method for calculating stddev, we cannot just
612 * average them we do below for nr > 1
613 */
614 if (nr == 1) {
615 mean = src->mean;
616 S = src->S;
617 } else {
618 mean = ((src->mean * (double) (nr - 1)) + dst->mean) / ((double) nr);
619 S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
620 }
621
622 dst->mean = mean;
623 dst->S = S;
624}
625
626void show_run_stats(void)
627{
628 struct group_run_stats *runstats, *rs;
629 struct thread_data *td;
630 struct thread_stat *threadstats, *ts;
631 int i, j, k, l, nr_ts, last_ts, idx;
632
633 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
634
635 for (i = 0; i < groupid + 1; i++) {
636 rs = &runstats[i];
637
638 memset(rs, 0, sizeof(*rs));
639 rs->min_bw[0] = rs->min_run[0] = ~0UL;
640 rs->min_bw[1] = rs->min_run[1] = ~0UL;
641 }
642
643 /*
644 * find out how many threads stats we need. if group reporting isn't
645 * enabled, it's one-per-td.
646 */
647 nr_ts = 0;
648 last_ts = -1;
649 for_each_td(td, i) {
650 if (!td->group_reporting) {
651 nr_ts++;
652 continue;
653 }
654 if (last_ts == td->groupid)
655 continue;
656
657 last_ts = td->groupid;
658 nr_ts++;
659 }
660
661 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
662
663 for (i = 0; i < nr_ts; i++) {
664 ts = &threadstats[i];
665
666 memset(ts, 0, sizeof(*ts));
667 for (j = 0; j <= DDIR_WRITE; j++) {
668 ts->clat_stat[j].min_val = -1UL;
669 ts->slat_stat[j].min_val = -1UL;
670 ts->bw_stat[j].min_val = -1UL;
671 }
672 }
673
674 j = 0;
675 last_ts = -1;
676 idx = 0;
677 for_each_td(td, i) {
678 ts = &threadstats[j];
679
680 idx++;
681 ts->members++;
682
683 if (!ts->groupid) {
684 /*
685 * These are per-group shared already
686 */
687 ts->name = td->name;
688 ts->description = td->description;
689 ts->groupid = td->groupid;
690
691 /*
692 * first pid in group, not very useful...
693 */
694 ts->pid = td->pid;
695 }
696
697 if (td->error && !ts->error) {
698 ts->error = td->error;
699 ts->verror = td->verror;
700 }
701
702 for (l = 0; l <= DDIR_WRITE; l++) {
703 sum_stat(&ts->clat_stat[l], &td->ts.clat_stat[l], idx);
704 sum_stat(&ts->slat_stat[l], &td->ts.slat_stat[l], idx);
705 sum_stat(&ts->bw_stat[l], &td->ts.bw_stat[l], idx);
706
707 ts->stat_io_bytes[l] += td->ts.stat_io_bytes[l];
708 ts->io_bytes[l] += td->ts.io_bytes[l];
709
710 if (ts->runtime[l] < td->ts.runtime[l])
711 ts->runtime[l] = td->ts.runtime[l];
712 }
713
714 ts->usr_time += td->ts.usr_time;
715 ts->sys_time += td->ts.sys_time;
716 ts->ctx += td->ts.ctx;
717
718 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
719 ts->io_u_map[k] += td->ts.io_u_map[k];
720 for (k = 0; k < FIO_IO_U_LAT_NR; k++)
721 ts->io_u_lat[k] += td->ts.io_u_lat[k];
722
723 for (k = 0; k <= DDIR_WRITE; k++)
724 ts->total_io_u[k] += td->ts.total_io_u[k];
725
726 ts->total_run_time += td->ts.total_run_time;
727
728 if (!td->group_reporting) {
729 idx = 0;
730 j++;
731 continue;
732 }
733 if (last_ts == td->groupid)
734 continue;
735
736 if (last_ts != -1) {
737 idx = 0;
738 j++;
739 }
740
741 last_ts = td->groupid;
742 }
743
744 for (i = 0; i < nr_ts; i++) {
745 unsigned long long bw;
746
747 ts = &threadstats[i];
748 rs = &runstats[ts->groupid];
749
750 for (j = 0; j <= DDIR_WRITE; j++) {
751 if (!ts->runtime[j])
752 continue;
753 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
754 rs->min_run[j] = ts->runtime[j];
755 if (ts->runtime[j] > rs->max_run[j])
756 rs->max_run[j] = ts->runtime[j];
757
758 bw = 0;
759 if (ts->runtime[j])
760 bw = ts->io_bytes[j] / (unsigned long long) ts->runtime[j];
761 if (bw < rs->min_bw[j])
762 rs->min_bw[j] = bw;
763 if (bw > rs->max_bw[j])
764 rs->max_bw[j] = bw;
765
766 rs->io_kb[j] += ts->io_bytes[j] >> 10;
767 }
768 }
769
770 for (i = 0; i < groupid + 1; i++) {
771 rs = &runstats[i];
772
773 if (rs->max_run[0])
774 rs->agg[0] = (rs->io_kb[0]*1024) / rs->max_run[0];
775 if (rs->max_run[1])
776 rs->agg[1] = (rs->io_kb[1]*1024) / rs->max_run[1];
777 }
778
779 /*
780 * don't overwrite last signal output
781 */
782 if (!terse_output)
783 printf("\n");
784
785 for (i = 0; i < nr_ts; i++) {
786 ts = &threadstats[i];
787 rs = &runstats[ts->groupid];
788
789 if (terse_output)
790 show_thread_status_terse(ts, rs);
791 else
792 show_thread_status(ts, rs);
793 }
794
795 if (!terse_output) {
796 for (i = 0; i < groupid + 1; i++)
797 show_group_stats(&runstats[i], i);
798
799 show_disk_util();
800 }
801
802 free(runstats);
803 free(threadstats);
804}
805
806static inline void add_stat_sample(struct io_stat *is, unsigned long data)
807{
808 double val = data;
809 double delta;
810
811 if (data > is->max_val)
812 is->max_val = data;
813 if (data < is->min_val)
814 is->min_val = data;
815
816 delta = val - is->mean;
817 is->mean += delta / (is->samples + 1.0);
818 is->S += delta * (val - is->mean);
819
820 is->samples++;
821}
822
823static void __add_log_sample(struct io_log *iolog, unsigned long val,
824 enum fio_ddir ddir, unsigned long time)
825{
826 if (iolog->nr_samples == iolog->max_samples) {
827 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
828
829 iolog->log = realloc(iolog->log, new_size);
830 iolog->max_samples <<= 1;
831 }
832
833 iolog->log[iolog->nr_samples].val = val;
834 iolog->log[iolog->nr_samples].time = time;
835 iolog->log[iolog->nr_samples].ddir = ddir;
836 iolog->nr_samples++;
837}
838
839static void add_log_sample(struct thread_data *td, struct io_log *iolog,
840 unsigned long val, enum fio_ddir ddir)
841{
842 __add_log_sample(iolog, val, ddir, mtime_since_now(&td->epoch));
843}
844
845void add_agg_sample(unsigned long val, enum fio_ddir ddir)
846{
847 struct io_log *iolog = agg_io_log[ddir];
848
849 __add_log_sample(iolog, val, ddir, mtime_since_genesis());
850}
851
852void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
853 unsigned long msec)
854{
855 struct thread_stat *ts = &td->ts;
856
857 add_stat_sample(&ts->clat_stat[ddir], msec);
858
859 if (ts->clat_log)
860 add_log_sample(td, ts->clat_log, msec, ddir);
861}
862
863void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
864 unsigned long msec)
865{
866 struct thread_stat *ts = &td->ts;
867
868 add_stat_sample(&ts->slat_stat[ddir], msec);
869
870 if (ts->slat_log)
871 add_log_sample(td, ts->slat_log, msec, ddir);
872}
873
874void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
875 struct timeval *t)
876{
877 struct thread_stat *ts = &td->ts;
878 unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
879 unsigned long rate;
880
881 if (spent < td->bw_avg_time)
882 return;
883
884 rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
885 add_stat_sample(&ts->bw_stat[ddir], rate);
886
887 if (ts->bw_log)
888 add_log_sample(td, ts->bw_log, rate, ddir);
889
890 fio_gettime(&ts->stat_sample_time[ddir], NULL);
891 ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
892}