[PATCH] disk_util: Allow an engine to turn off disk_util specifically
[fio.git] / stat.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <string.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <dirent.h>
7#include <libgen.h>
8#include <math.h>
9
10#include "fio.h"
11
12static struct itimerval itimer;
13static struct list_head disk_list = LIST_HEAD_INIT(disk_list);
14static dev_t last_dev;
15
16/*
17 * Cheesy number->string conversion, complete with carry rounding error.
18 */
19static char *num2str(unsigned long num, int maxlen, int base, int pow2)
20{
21 char postfix[] = { ' ', 'K', 'M', 'G', 'P', 'E' };
22 unsigned int thousand;
23 char *buf;
24 int i;
25
26 if (pow2)
27 thousand = 1024;
28 else
29 thousand = 1000;
30
31 buf = malloc(128);
32
33 for (i = 0; base > 1; i++)
34 base /= thousand;
35
36 do {
37 int len, carry = 0;
38
39 len = sprintf(buf, "%'lu", num);
40 if (len <= maxlen) {
41 if (i >= 1) {
42 buf[len] = postfix[i];
43 buf[len + 1] = '\0';
44 }
45 return buf;
46 }
47
48 if ((num % thousand) >= (thousand / 2))
49 carry = 1;
50
51 num /= thousand;
52 num += carry;
53 i++;
54 } while (i <= 5);
55
56 return buf;
57}
58
59static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
60{
61 unsigned in_flight;
62 char line[256];
63 FILE *f;
64 char *p;
65
66 f = fopen(du->path, "r");
67 if (!f)
68 return 1;
69
70 p = fgets(line, sizeof(line), f);
71 if (!p) {
72 fclose(f);
73 return 1;
74 }
75
76 if (sscanf(p, "%u %u %llu %u %u %u %llu %u %u %u %u\n", &dus->ios[0], &dus->merges[0], &dus->sectors[0], &dus->ticks[0], &dus->ios[1], &dus->merges[1], &dus->sectors[1], &dus->ticks[1], &in_flight, &dus->io_ticks, &dus->time_in_queue) != 11) {
77 fclose(f);
78 return 1;
79 }
80
81 fclose(f);
82 return 0;
83}
84
85static void update_io_tick_disk(struct disk_util *du)
86{
87 struct disk_util_stat __dus, *dus, *ldus;
88 struct timeval t;
89
90 if (get_io_ticks(du, &__dus))
91 return;
92
93 dus = &du->dus;
94 ldus = &du->last_dus;
95
96 dus->sectors[0] += (__dus.sectors[0] - ldus->sectors[0]);
97 dus->sectors[1] += (__dus.sectors[1] - ldus->sectors[1]);
98 dus->ios[0] += (__dus.ios[0] - ldus->ios[0]);
99 dus->ios[1] += (__dus.ios[1] - ldus->ios[1]);
100 dus->merges[0] += (__dus.merges[0] - ldus->merges[0]);
101 dus->merges[1] += (__dus.merges[1] - ldus->merges[1]);
102 dus->ticks[0] += (__dus.ticks[0] - ldus->ticks[0]);
103 dus->ticks[1] += (__dus.ticks[1] - ldus->ticks[1]);
104 dus->io_ticks += (__dus.io_ticks - ldus->io_ticks);
105 dus->time_in_queue += (__dus.time_in_queue - ldus->time_in_queue);
106
107 fio_gettime(&t, NULL);
108 du->msec += mtime_since(&du->time, &t);
109 memcpy(&du->time, &t, sizeof(t));
110 memcpy(ldus, &__dus, sizeof(__dus));
111}
112
113void update_io_ticks(void)
114{
115 struct list_head *entry;
116 struct disk_util *du;
117
118 list_for_each(entry, &disk_list) {
119 du = list_entry(entry, struct disk_util, list);
120 update_io_tick_disk(du);
121 }
122}
123
124static int disk_util_exists(dev_t dev)
125{
126 struct list_head *entry;
127 struct disk_util *du;
128
129 list_for_each(entry, &disk_list) {
130 du = list_entry(entry, struct disk_util, list);
131
132 if (du->dev == dev)
133 return 1;
134 }
135
136 return 0;
137}
138
139static void disk_util_add(dev_t dev, char *path)
140{
141 struct disk_util *du, *__du;
142 struct list_head *entry;
143
144 du = malloc(sizeof(*du));
145 memset(du, 0, sizeof(*du));
146 INIT_LIST_HEAD(&du->list);
147 sprintf(du->path, "%s/stat", path);
148 du->name = strdup(basename(path));
149 du->dev = dev;
150
151 list_for_each(entry, &disk_list) {
152 __du = list_entry(entry, struct disk_util, list);
153
154 if (!strcmp(du->name, __du->name)) {
155 free(du->name);
156 free(du);
157 return;
158 }
159 }
160
161 fio_gettime(&du->time, NULL);
162 get_io_ticks(du, &du->last_dus);
163
164 list_add_tail(&du->list, &disk_list);
165}
166
167static int check_dev_match(dev_t dev, char *path)
168{
169 unsigned int major, minor;
170 char line[256], *p;
171 FILE *f;
172
173 f = fopen(path, "r");
174 if (!f) {
175 perror("open path");
176 return 1;
177 }
178
179 p = fgets(line, sizeof(line), f);
180 if (!p) {
181 fclose(f);
182 return 1;
183 }
184
185 if (sscanf(p, "%u:%u", &major, &minor) != 2) {
186 fclose(f);
187 return 1;
188 }
189
190 if (((major << 8) | minor) == dev) {
191 fclose(f);
192 return 0;
193 }
194
195 fclose(f);
196 return 1;
197}
198
199static int find_block_dir(dev_t dev, char *path)
200{
201 struct dirent *dir;
202 struct stat st;
203 int found = 0;
204 DIR *D;
205
206 D = opendir(path);
207 if (!D)
208 return 0;
209
210 while ((dir = readdir(D)) != NULL) {
211 char full_path[256];
212
213 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
214 continue;
215
216 sprintf(full_path, "%s/%s", path, dir->d_name);
217
218 if (!strcmp(dir->d_name, "dev")) {
219 if (!check_dev_match(dev, full_path)) {
220 found = 1;
221 break;
222 }
223 }
224
225 if (lstat(full_path, &st) == -1) {
226 perror("stat");
227 break;
228 }
229
230 if (!S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))
231 continue;
232
233 found = find_block_dir(dev, full_path);
234 if (found) {
235 strcpy(path, full_path);
236 break;
237 }
238 }
239
240 closedir(D);
241 return found;
242}
243
244void init_disk_util(struct thread_data *td)
245{
246 struct fio_file *f;
247 struct stat st;
248 char foo[PATH_MAX], tmp[PATH_MAX];
249 dev_t dev;
250 char *p;
251
252 if (!td->do_disk_util ||
253 (td->io_ops->flags & (FIO_DISKLESSIO | FIO_NODISKUTIL)))
254 return;
255
256 /*
257 * Just use the same file, they are on the same device.
258 */
259 f = &td->files[0];
260 if (!stat(f->file_name, &st)) {
261 if (S_ISBLK(st.st_mode))
262 dev = st.st_rdev;
263 else
264 dev = st.st_dev;
265 } else {
266 /*
267 * must be a file, open "." in that path
268 */
269 strncpy(foo, f->file_name, PATH_MAX - 1);
270 p = dirname(foo);
271 if (stat(p, &st)) {
272 perror("disk util stat");
273 return;
274 }
275
276 dev = st.st_dev;
277 }
278
279 if (disk_util_exists(dev))
280 return;
281
282 /*
283 * for an fs without a device, we will repeatedly stat through
284 * sysfs which can take oodles of time for thousands of files. so
285 * cache the last lookup and compare with that before going through
286 * everything again.
287 */
288 if (dev == last_dev)
289 return;
290
291 last_dev = dev;
292
293 sprintf(foo, "/sys/block");
294 if (!find_block_dir(dev, foo))
295 return;
296
297 /*
298 * If there's a ../queue/ directory there, we are inside a partition.
299 * Check if that is the case and jump back. For loop/md/dm etc we
300 * are already in the right spot.
301 */
302 sprintf(tmp, "%s/../queue", foo);
303 if (!stat(tmp, &st)) {
304 p = dirname(foo);
305 sprintf(tmp, "%s/queue", p);
306 if (stat(tmp, &st)) {
307 log_err("unknown sysfs layout\n");
308 return;
309 }
310 strncpy(tmp, p, PATH_MAX - 1);
311 sprintf(foo, "%s", tmp);
312 }
313
314 if (td->ioscheduler)
315 td->sysfs_root = strdup(foo);
316
317 disk_util_add(dev, foo);
318}
319
320void disk_util_timer_arm(void)
321{
322 itimer.it_value.tv_sec = 0;
323 itimer.it_value.tv_usec = DISK_UTIL_MSEC * 1000;
324 setitimer(ITIMER_REAL, &itimer, NULL);
325}
326
327void update_rusage_stat(struct thread_data *td)
328{
329 struct thread_stat *ts = &td->ts;
330
331 getrusage(RUSAGE_SELF, &ts->ru_end);
332
333 ts->usr_time += mtime_since(&ts->ru_start.ru_utime, &ts->ru_end.ru_utime);
334 ts->sys_time += mtime_since(&ts->ru_start.ru_stime, &ts->ru_end.ru_stime);
335 ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
336
337 memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
338}
339
340static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
341 double *mean, double *dev)
342{
343 double n = is->samples;
344
345 if (is->samples == 0)
346 return 0;
347
348 *min = is->min_val;
349 *max = is->max_val;
350
351 n = (double) is->samples;
352 *mean = is->mean;
353
354 if (n > 1.0)
355 *dev = sqrt(is->S / (n - 1.0));
356 else
357 *dev = -1.0;
358
359 return 1;
360}
361
362static void show_group_stats(struct group_run_stats *rs, int id)
363{
364 char *p1, *p2, *p3, *p4;
365 const char *ddir_str[] = { " READ", " WRITE" };
366 int i;
367
368 fprintf(f_out, "\nRun status group %d (all jobs):\n", id);
369
370 for (i = 0; i <= DDIR_WRITE; i++) {
371 if (!rs->max_run[i])
372 continue;
373
374 p1 = num2str(rs->io_kb[i], 6, 1000, 1);
375 p2 = num2str(rs->agg[i], 6, 1000, 1);
376 p3 = num2str(rs->min_bw[i], 6, 1000, 1);
377 p4 = num2str(rs->max_bw[i], 6, 1000, 1);
378
379 fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]);
380
381 free(p1);
382 free(p2);
383 free(p3);
384 free(p4);
385 }
386}
387
388static void show_disk_util(void)
389{
390 struct disk_util_stat *dus;
391 struct list_head *entry, *next;
392 struct disk_util *du;
393 double util;
394
395 fprintf(f_out, "\nDisk stats (read/write):\n");
396
397 list_for_each(entry, &disk_list) {
398 du = list_entry(entry, struct disk_util, list);
399 dus = &du->dus;
400
401 util = (double) 100 * du->dus.io_ticks / (double) du->msec;
402 if (util > 100.0)
403 util = 100.0;
404
405 fprintf(f_out, " %s: ios=%u/%u, merge=%u/%u, ticks=%u/%u, in_queue=%u, util=%3.2f%%\n", du->name, dus->ios[0], dus->ios[1], dus->merges[0], dus->merges[1], dus->ticks[0], dus->ticks[1], dus->time_in_queue, util);
406 }
407
408 /*
409 * now free the list
410 */
411 list_for_each_safe(entry, next, &disk_list) {
412 list_del(entry);
413 du = list_entry(entry, struct disk_util, list);
414 free(du->name);
415 free(du);
416 }
417}
418
419#define ts_total_io_u(ts) \
420 ((ts)->total_io_u[0] + (ts)->total_io_u[1])
421
422static void stat_calc_dist(struct thread_stat *ts, double *io_u_dist)
423{
424 int i;
425
426 /*
427 * Do depth distribution calculations
428 */
429 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
430 io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts_total_io_u(ts);
431 io_u_dist[i] *= 100.0;
432 }
433}
434
435static void stat_calc_lat(struct thread_stat *ts, double *io_u_lat)
436{
437 int i;
438
439 /*
440 * Do latency distribution calculations
441 */
442 for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
443 io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts_total_io_u(ts);
444 io_u_lat[i] *= 100.0;
445 }
446}
447
448static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
449 int ddir)
450{
451 const char *ddir_str[] = { "read ", "write" };
452 unsigned long min, max;
453 unsigned long long bw, iops;
454 double mean, dev;
455 char *io_p, *bw_p, *iops_p;
456
457 if (!ts->runtime[ddir])
458 return;
459
460 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
461 iops = (1000 * ts->total_io_u[ddir]) / ts->runtime[ddir];
462 io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1000, 1);
463 bw_p = num2str(bw, 6, 1000, 1);
464 iops_p = num2str(iops, 6, 1, 0);
465
466 fprintf(f_out, " %s: io=%siB, bw=%siB/s, iops=%s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, iops_p, ts->runtime[ddir]);
467
468 free(io_p);
469 free(bw_p);
470 free(iops_p);
471
472 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
473 fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
474
475 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
476 fprintf(f_out, " clat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
477
478 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
479 double p_of_agg;
480
481 p_of_agg = mean * 100 / (double) rs->agg[ddir];
482 fprintf(f_out, " bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, stdev=%5.02f\n", min, max, p_of_agg, mean, dev);
483 }
484}
485
486static void show_thread_status(struct thread_stat *ts,
487 struct group_run_stats *rs)
488{
489 double usr_cpu, sys_cpu;
490 unsigned long runtime;
491 double io_u_dist[FIO_IO_U_MAP_NR];
492 double io_u_lat[FIO_IO_U_LAT_NR];
493
494 if (!(ts->io_bytes[0] + ts->io_bytes[1]))
495 return;
496
497 if (!ts->error)
498 fprintf(f_out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->pid);
499 else
500 fprintf(f_out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->verror, ts->pid);
501
502 if (ts->description)
503 fprintf(f_out, " Description : [%s]\n", ts->description);
504
505 if (ts->io_bytes[DDIR_READ])
506 show_ddir_status(rs, ts, DDIR_READ);
507 if (ts->io_bytes[DDIR_WRITE])
508 show_ddir_status(rs, ts, DDIR_WRITE);
509
510 runtime = ts->total_run_time;
511 if (runtime) {
512 double runt = (double) runtime;
513
514 usr_cpu = (double) ts->usr_time * 100 / runt;
515 sys_cpu = (double) ts->sys_time * 100 / runt;
516 } else {
517 usr_cpu = 0;
518 sys_cpu = 0;
519 }
520
521 fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
522
523 stat_calc_dist(ts, io_u_dist);
524 stat_calc_lat(ts, io_u_lat);
525
526 fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
527
528 fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
529 fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
530}
531
532static void show_ddir_status_terse(struct thread_stat *ts,
533 struct group_run_stats *rs, int ddir)
534{
535 unsigned long min, max;
536 unsigned long long bw;
537 double mean, dev;
538
539 bw = 0;
540 if (ts->runtime[ddir])
541 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
542
543 fprintf(f_out, ";%llu;%llu;%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
544
545 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
546 fprintf(f_out, ";%lu;%lu;%f;%f", min, max, mean, dev);
547 else
548 fprintf(f_out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
549
550 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
551 fprintf(f_out, ";%lu;%lu;%f;%f", min, max, mean, dev);
552 else
553 fprintf(f_out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
554
555 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
556 double p_of_agg;
557
558 p_of_agg = mean * 100 / (double) rs->agg[ddir];
559 fprintf(f_out, ";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
560 } else
561 fprintf(f_out, ";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
562}
563
564
565static void show_thread_status_terse(struct thread_stat *ts,
566 struct group_run_stats *rs)
567{
568 double io_u_dist[FIO_IO_U_MAP_NR];
569 double io_u_lat[FIO_IO_U_LAT_NR];
570 double usr_cpu, sys_cpu;
571
572 fprintf(f_out, "%s;%d;%d", ts->name, ts->groupid, ts->error);
573
574 show_ddir_status_terse(ts, rs, 0);
575 show_ddir_status_terse(ts, rs, 1);
576
577 if (ts->total_run_time) {
578 double runt = (double) ts->total_run_time;
579
580 usr_cpu = (double) ts->usr_time * 100 / runt;
581 sys_cpu = (double) ts->sys_time * 100 / runt;
582 } else {
583 usr_cpu = 0;
584 sys_cpu = 0;
585 }
586
587 fprintf(f_out, ";%f%%;%f%%;%lu", usr_cpu, sys_cpu, ts->ctx);
588
589 stat_calc_dist(ts, io_u_dist);
590 stat_calc_lat(ts, io_u_lat);
591
592 fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
593
594 fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
595 fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
596
597 if (ts->description)
598 fprintf(f_out, ";%s", ts->description);
599
600 fprintf(f_out, "\n");
601}
602
603static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
604{
605 double mean, S;
606
607 dst->min_val = min(dst->min_val, src->min_val);
608 dst->max_val = max(dst->max_val, src->max_val);
609 dst->samples += src->samples;
610
611 /*
612 * Needs a new method for calculating stddev, we cannot just
613 * average them we do below for nr > 1
614 */
615 if (nr == 1) {
616 mean = src->mean;
617 S = src->S;
618 } else {
619 mean = ((src->mean * (double) (nr - 1)) + dst->mean) / ((double) nr);
620 S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
621 }
622
623 dst->mean = mean;
624 dst->S = S;
625}
626
627void show_run_stats(void)
628{
629 struct group_run_stats *runstats, *rs;
630 struct thread_data *td;
631 struct thread_stat *threadstats, *ts;
632 int i, j, k, l, nr_ts, last_ts, idx;
633
634 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
635
636 for (i = 0; i < groupid + 1; i++) {
637 rs = &runstats[i];
638
639 memset(rs, 0, sizeof(*rs));
640 rs->min_bw[0] = rs->min_run[0] = ~0UL;
641 rs->min_bw[1] = rs->min_run[1] = ~0UL;
642 }
643
644 /*
645 * find out how many threads stats we need. if group reporting isn't
646 * enabled, it's one-per-td.
647 */
648 nr_ts = 0;
649 last_ts = -1;
650 for_each_td(td, i) {
651 if (!td->group_reporting) {
652 nr_ts++;
653 continue;
654 }
655 if (last_ts == td->groupid)
656 continue;
657
658 last_ts = td->groupid;
659 nr_ts++;
660 }
661
662 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
663
664 for (i = 0; i < nr_ts; i++) {
665 ts = &threadstats[i];
666
667 memset(ts, 0, sizeof(*ts));
668 for (j = 0; j <= DDIR_WRITE; j++) {
669 ts->clat_stat[j].min_val = -1UL;
670 ts->slat_stat[j].min_val = -1UL;
671 ts->bw_stat[j].min_val = -1UL;
672 }
673 ts->groupid = -1;
674 }
675
676 j = 0;
677 last_ts = -1;
678 idx = 0;
679 for_each_td(td, i) {
680 if (idx && (!td->group_reporting ||
681 (td->group_reporting && last_ts != td->groupid))) {
682 idx = 0;
683 j++;
684 }
685
686 last_ts = td->groupid;
687
688 ts = &threadstats[j];
689
690 idx++;
691 ts->members++;
692
693 if (ts->groupid == -1) {
694 /*
695 * These are per-group shared already
696 */
697 ts->name = td->name;
698 ts->description = td->description;
699 ts->groupid = td->groupid;
700
701 /*
702 * first pid in group, not very useful...
703 */
704 ts->pid = td->pid;
705 }
706
707 if (td->error && !ts->error) {
708 ts->error = td->error;
709 ts->verror = td->verror;
710 }
711
712 for (l = 0; l <= DDIR_WRITE; l++) {
713 sum_stat(&ts->clat_stat[l], &td->ts.clat_stat[l], idx);
714 sum_stat(&ts->slat_stat[l], &td->ts.slat_stat[l], idx);
715 sum_stat(&ts->bw_stat[l], &td->ts.bw_stat[l], idx);
716
717 ts->stat_io_bytes[l] += td->ts.stat_io_bytes[l];
718 ts->io_bytes[l] += td->ts.io_bytes[l];
719
720 if (ts->runtime[l] < td->ts.runtime[l])
721 ts->runtime[l] = td->ts.runtime[l];
722 }
723
724 ts->usr_time += td->ts.usr_time;
725 ts->sys_time += td->ts.sys_time;
726 ts->ctx += td->ts.ctx;
727
728 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
729 ts->io_u_map[k] += td->ts.io_u_map[k];
730 for (k = 0; k < FIO_IO_U_LAT_NR; k++)
731 ts->io_u_lat[k] += td->ts.io_u_lat[k];
732
733 for (k = 0; k <= DDIR_WRITE; k++)
734 ts->total_io_u[k] += td->ts.total_io_u[k];
735
736 ts->total_run_time += td->ts.total_run_time;
737 }
738
739 for (i = 0; i < nr_ts; i++) {
740 unsigned long long bw;
741
742 ts = &threadstats[i];
743 rs = &runstats[ts->groupid];
744
745 for (j = 0; j <= DDIR_WRITE; j++) {
746 if (!ts->runtime[j])
747 continue;
748 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
749 rs->min_run[j] = ts->runtime[j];
750 if (ts->runtime[j] > rs->max_run[j])
751 rs->max_run[j] = ts->runtime[j];
752
753 bw = 0;
754 if (ts->runtime[j])
755 bw = ts->io_bytes[j] / (unsigned long long) ts->runtime[j];
756 if (bw < rs->min_bw[j])
757 rs->min_bw[j] = bw;
758 if (bw > rs->max_bw[j])
759 rs->max_bw[j] = bw;
760
761 rs->io_kb[j] += ts->io_bytes[j] >> 10;
762 }
763 }
764
765 for (i = 0; i < groupid + 1; i++) {
766 rs = &runstats[i];
767
768 if (rs->max_run[0])
769 rs->agg[0] = (rs->io_kb[0]*1024) / rs->max_run[0];
770 if (rs->max_run[1])
771 rs->agg[1] = (rs->io_kb[1]*1024) / rs->max_run[1];
772 }
773
774 /*
775 * don't overwrite last signal output
776 */
777 if (!terse_output)
778 printf("\n");
779
780 for (i = 0; i < nr_ts; i++) {
781 ts = &threadstats[i];
782 rs = &runstats[ts->groupid];
783
784 if (terse_output)
785 show_thread_status_terse(ts, rs);
786 else
787 show_thread_status(ts, rs);
788 }
789
790 if (!terse_output) {
791 for (i = 0; i < groupid + 1; i++)
792 show_group_stats(&runstats[i], i);
793
794 show_disk_util();
795 }
796
797 free(runstats);
798 free(threadstats);
799}
800
801static inline void add_stat_sample(struct io_stat *is, unsigned long data)
802{
803 double val = data;
804 double delta;
805
806 if (data > is->max_val)
807 is->max_val = data;
808 if (data < is->min_val)
809 is->min_val = data;
810
811 delta = val - is->mean;
812 is->mean += delta / (is->samples + 1.0);
813 is->S += delta * (val - is->mean);
814
815 is->samples++;
816}
817
818static void __add_log_sample(struct io_log *iolog, unsigned long val,
819 enum fio_ddir ddir, unsigned long time)
820{
821 if (iolog->nr_samples == iolog->max_samples) {
822 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
823
824 iolog->log = realloc(iolog->log, new_size);
825 iolog->max_samples <<= 1;
826 }
827
828 iolog->log[iolog->nr_samples].val = val;
829 iolog->log[iolog->nr_samples].time = time;
830 iolog->log[iolog->nr_samples].ddir = ddir;
831 iolog->nr_samples++;
832}
833
834static void add_log_sample(struct thread_data *td, struct io_log *iolog,
835 unsigned long val, enum fio_ddir ddir)
836{
837 __add_log_sample(iolog, val, ddir, mtime_since_now(&td->epoch));
838}
839
840void add_agg_sample(unsigned long val, enum fio_ddir ddir)
841{
842 struct io_log *iolog = agg_io_log[ddir];
843
844 __add_log_sample(iolog, val, ddir, mtime_since_genesis());
845}
846
847void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
848 unsigned long msec)
849{
850 struct thread_stat *ts = &td->ts;
851
852 add_stat_sample(&ts->clat_stat[ddir], msec);
853
854 if (ts->clat_log)
855 add_log_sample(td, ts->clat_log, msec, ddir);
856}
857
858void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
859 unsigned long msec)
860{
861 struct thread_stat *ts = &td->ts;
862
863 add_stat_sample(&ts->slat_stat[ddir], msec);
864
865 if (ts->slat_log)
866 add_log_sample(td, ts->slat_log, msec, ddir);
867}
868
869void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
870 struct timeval *t)
871{
872 struct thread_stat *ts = &td->ts;
873 unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
874 unsigned long rate;
875
876 if (spent < td->bw_avg_time)
877 return;
878
879 rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
880 add_stat_sample(&ts->bw_stat[ddir], rate);
881
882 if (ts->bw_log)
883 add_log_sample(td, ts->bw_log, rate, ddir);
884
885 fio_gettime(&ts->stat_sample_time[ddir], NULL);
886 ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
887}