Seperate disk util code out of fio.c
[fio.git] / stat.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <string.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <dirent.h>
7#include <libgen.h>
8#include <math.h>
9
10#include "fio.h"
11
12/*
13 * Cheesy number->string conversion, complete with carry rounding error.
14 */
15static char *num2str(unsigned long num, int maxlen, int base, int pow2)
16{
17 char postfix[] = { ' ', 'K', 'M', 'G', 'P', 'E' };
18 unsigned int thousand;
19 char *buf;
20 int i;
21
22 if (pow2)
23 thousand = 1024;
24 else
25 thousand = 1000;
26
27 buf = malloc(128);
28
29 for (i = 0; base > 1; i++)
30 base /= thousand;
31
32 do {
33 int len, carry = 0;
34
35 len = sprintf(buf, "%'lu", num);
36 if (len <= maxlen) {
37 if (i >= 1) {
38 buf[len] = postfix[i];
39 buf[len + 1] = '\0';
40 }
41 return buf;
42 }
43
44 if ((num % thousand) >= (thousand / 2))
45 carry = 1;
46
47 num /= thousand;
48 num += carry;
49 i++;
50 } while (i <= 5);
51
52 return buf;
53}
54
55void update_rusage_stat(struct thread_data *td)
56{
57 struct thread_stat *ts = &td->ts;
58
59 getrusage(RUSAGE_SELF, &ts->ru_end);
60
61 ts->usr_time += mtime_since(&ts->ru_start.ru_utime, &ts->ru_end.ru_utime);
62 ts->sys_time += mtime_since(&ts->ru_start.ru_stime, &ts->ru_end.ru_stime);
63 ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
64
65 memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
66}
67
68static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
69 double *mean, double *dev)
70{
71 double n = is->samples;
72
73 if (is->samples == 0)
74 return 0;
75
76 *min = is->min_val;
77 *max = is->max_val;
78
79 n = (double) is->samples;
80 *mean = is->mean;
81
82 if (n > 1.0)
83 *dev = sqrt(is->S / (n - 1.0));
84 else
85 *dev = -1.0;
86
87 return 1;
88}
89
90static void show_group_stats(struct group_run_stats *rs, int id)
91{
92 char *p1, *p2, *p3, *p4;
93 const char *ddir_str[] = { " READ", " WRITE" };
94 int i;
95
96 log_info("\nRun status group %d (all jobs):\n", id);
97
98 for (i = 0; i <= DDIR_WRITE; i++) {
99 if (!rs->max_run[i])
100 continue;
101
102 p1 = num2str(rs->io_kb[i], 6, 1000, 1);
103 p2 = num2str(rs->agg[i], 6, 1000, 1);
104 p3 = num2str(rs->min_bw[i], 6, 1000, 1);
105 p4 = num2str(rs->max_bw[i], 6, 1000, 1);
106
107 log_info("%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]);
108
109 free(p1);
110 free(p2);
111 free(p3);
112 free(p4);
113 }
114}
115
116#define ts_total_io_u(ts) \
117 ((ts)->total_io_u[0] + (ts)->total_io_u[1])
118
119static void stat_calc_dist(struct thread_stat *ts, double *io_u_dist)
120{
121 int i;
122
123 /*
124 * Do depth distribution calculations
125 */
126 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
127 io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts_total_io_u(ts);
128 io_u_dist[i] *= 100.0;
129 if (io_u_dist[i] < 0.1 && ts->io_u_map[i])
130 io_u_dist[i] = 0.1;
131 }
132}
133
134static void stat_calc_lat(struct thread_stat *ts, double *io_u_lat)
135{
136 int i;
137
138 /*
139 * Do latency distribution calculations
140 */
141 for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
142 io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts_total_io_u(ts);
143 io_u_lat[i] *= 100.0;
144 if (io_u_lat[i] < 0.01 && ts->io_u_lat[i])
145 io_u_lat[i] = 0.01;
146 }
147}
148
149static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
150 int ddir)
151{
152 const char *ddir_str[] = { "read ", "write" };
153 unsigned long min, max;
154 unsigned long long bw, iops;
155 double mean, dev;
156 char *io_p, *bw_p, *iops_p;
157
158 if (!ts->runtime[ddir])
159 return;
160
161 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
162 iops = (1000 * ts->total_io_u[ddir]) / ts->runtime[ddir];
163 io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1000, 1);
164 bw_p = num2str(bw, 6, 1000, 1);
165 iops_p = num2str(iops, 6, 1, 0);
166
167 log_info(" %s: io=%siB, bw=%siB/s, iops=%s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, iops_p, ts->runtime[ddir]);
168
169 free(io_p);
170 free(bw_p);
171 free(iops_p);
172
173 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
174 log_info(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
175
176 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
177 log_info(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
178
179 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
180 double p_of_agg;
181
182 p_of_agg = mean * 100 / (double) rs->agg[ddir];
183 log_info(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, stdev=%5.02f\n", min, max, p_of_agg, mean, dev);
184 }
185}
186
187static void show_thread_status(struct thread_stat *ts,
188 struct group_run_stats *rs)
189{
190 double usr_cpu, sys_cpu;
191 unsigned long runtime;
192 double io_u_dist[FIO_IO_U_MAP_NR];
193 double io_u_lat[FIO_IO_U_LAT_NR];
194
195 if (!(ts->io_bytes[0] + ts->io_bytes[1]))
196 return;
197
198 if (!ts->error)
199 log_info("%s: (groupid=%d, jobs=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->pid);
200 else
201 log_info("%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->verror, ts->pid);
202
203 if (ts->description)
204 log_info(" Description : [%s]\n", ts->description);
205
206 if (ts->io_bytes[DDIR_READ])
207 show_ddir_status(rs, ts, DDIR_READ);
208 if (ts->io_bytes[DDIR_WRITE])
209 show_ddir_status(rs, ts, DDIR_WRITE);
210
211 runtime = ts->total_run_time;
212 if (runtime) {
213 double runt = (double) runtime;
214
215 usr_cpu = (double) ts->usr_time * 100 / runt;
216 sys_cpu = (double) ts->sys_time * 100 / runt;
217 } else {
218 usr_cpu = 0;
219 sys_cpu = 0;
220 }
221
222 log_info(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
223
224 stat_calc_dist(ts, io_u_dist);
225 stat_calc_lat(ts, io_u_lat);
226
227 log_info(" IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
228
229 log_info(" lat (msec): 2=%3.2f%%, 4=%3.2f%%, 10=%3.2f%%, 20=%3.2f%%, 50=%3.2f%%, 100=%3.2f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
230 log_info(" lat (msec): 250=%3.2f%%, 500=%3.2f%%, 750=%3.2f%%, 1000=%3.2f%%, >=2000=%3.2f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
231}
232
233static void show_ddir_status_terse(struct thread_stat *ts,
234 struct group_run_stats *rs, int ddir)
235{
236 unsigned long min, max;
237 unsigned long long bw;
238 double mean, dev;
239
240 bw = 0;
241 if (ts->runtime[ddir])
242 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
243
244 log_info(";%llu;%llu;%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
245
246 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
247 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
248 else
249 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
250
251 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
252 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
253 else
254 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
255
256 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
257 double p_of_agg;
258
259 p_of_agg = mean * 100 / (double) rs->agg[ddir];
260 log_info(";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
261 } else
262 log_info(";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
263}
264
265
266static void show_thread_status_terse(struct thread_stat *ts,
267 struct group_run_stats *rs)
268{
269 double io_u_dist[FIO_IO_U_MAP_NR];
270 double io_u_lat[FIO_IO_U_LAT_NR];
271 double usr_cpu, sys_cpu;
272
273 log_info("%s;%d;%d", ts->name, ts->groupid, ts->error);
274
275 show_ddir_status_terse(ts, rs, 0);
276 show_ddir_status_terse(ts, rs, 1);
277
278 if (ts->total_run_time) {
279 double runt = (double) ts->total_run_time;
280
281 usr_cpu = (double) ts->usr_time * 100 / runt;
282 sys_cpu = (double) ts->sys_time * 100 / runt;
283 } else {
284 usr_cpu = 0;
285 sys_cpu = 0;
286 }
287
288 log_info(";%f%%;%f%%;%lu", usr_cpu, sys_cpu, ts->ctx);
289
290 stat_calc_dist(ts, io_u_dist);
291 stat_calc_lat(ts, io_u_lat);
292
293 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
294
295 log_info(";%3.2f%%;%3.2f%%;%3.2f%%;%3.2f%%;%3.2f%%;%3.2f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
296 log_info(";%3.2f%%;%3.2f%%;%3.2f%%;%3.2f%%;%3.2f%%", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
297
298 if (ts->description)
299 log_info(";%s", ts->description);
300
301 log_info("\n");
302}
303
304static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
305{
306 double mean, S;
307
308 dst->min_val = min(dst->min_val, src->min_val);
309 dst->max_val = max(dst->max_val, src->max_val);
310 dst->samples += src->samples;
311
312 /*
313 * Needs a new method for calculating stddev, we cannot just
314 * average them we do below for nr > 1
315 */
316 if (nr == 1) {
317 mean = src->mean;
318 S = src->S;
319 } else {
320 mean = ((src->mean * (double) (nr - 1)) + dst->mean) / ((double) nr);
321 S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
322 }
323
324 dst->mean = mean;
325 dst->S = S;
326}
327
328void show_run_stats(void)
329{
330 struct group_run_stats *runstats, *rs;
331 struct thread_data *td;
332 struct thread_stat *threadstats, *ts;
333 int i, j, k, l, nr_ts, last_ts, idx;
334
335 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
336
337 for (i = 0; i < groupid + 1; i++) {
338 rs = &runstats[i];
339
340 memset(rs, 0, sizeof(*rs));
341 rs->min_bw[0] = rs->min_run[0] = ~0UL;
342 rs->min_bw[1] = rs->min_run[1] = ~0UL;
343 }
344
345 /*
346 * find out how many threads stats we need. if group reporting isn't
347 * enabled, it's one-per-td.
348 */
349 nr_ts = 0;
350 last_ts = -1;
351 for_each_td(td, i) {
352 if (!td->o.group_reporting) {
353 nr_ts++;
354 continue;
355 }
356 if (last_ts == td->groupid)
357 continue;
358
359 last_ts = td->groupid;
360 nr_ts++;
361 }
362
363 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
364
365 for (i = 0; i < nr_ts; i++) {
366 ts = &threadstats[i];
367
368 memset(ts, 0, sizeof(*ts));
369 for (j = 0; j <= DDIR_WRITE; j++) {
370 ts->clat_stat[j].min_val = -1UL;
371 ts->slat_stat[j].min_val = -1UL;
372 ts->bw_stat[j].min_val = -1UL;
373 }
374 ts->groupid = -1;
375 }
376
377 j = 0;
378 last_ts = -1;
379 idx = 0;
380 for_each_td(td, i) {
381 if (idx && (!td->o.group_reporting ||
382 (td->o.group_reporting && last_ts != td->groupid))) {
383 idx = 0;
384 j++;
385 }
386
387 last_ts = td->groupid;
388
389 ts = &threadstats[j];
390
391 idx++;
392 ts->members++;
393
394 if (ts->groupid == -1) {
395 /*
396 * These are per-group shared already
397 */
398 ts->name = td->o.name;
399 ts->description = td->o.description;
400 ts->groupid = td->groupid;
401
402 /*
403 * first pid in group, not very useful...
404 */
405 ts->pid = td->pid;
406 }
407
408 if (td->error && !ts->error) {
409 ts->error = td->error;
410 ts->verror = td->verror;
411 }
412
413 for (l = 0; l <= DDIR_WRITE; l++) {
414 sum_stat(&ts->clat_stat[l], &td->ts.clat_stat[l], idx);
415 sum_stat(&ts->slat_stat[l], &td->ts.slat_stat[l], idx);
416 sum_stat(&ts->bw_stat[l], &td->ts.bw_stat[l], idx);
417
418 ts->stat_io_bytes[l] += td->ts.stat_io_bytes[l];
419 ts->io_bytes[l] += td->ts.io_bytes[l];
420
421 if (ts->runtime[l] < td->ts.runtime[l])
422 ts->runtime[l] = td->ts.runtime[l];
423 }
424
425 ts->usr_time += td->ts.usr_time;
426 ts->sys_time += td->ts.sys_time;
427 ts->ctx += td->ts.ctx;
428
429 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
430 ts->io_u_map[k] += td->ts.io_u_map[k];
431 for (k = 0; k < FIO_IO_U_LAT_NR; k++)
432 ts->io_u_lat[k] += td->ts.io_u_lat[k];
433
434 for (k = 0; k <= DDIR_WRITE; k++)
435 ts->total_io_u[k] += td->ts.total_io_u[k];
436
437 ts->total_run_time += td->ts.total_run_time;
438 }
439
440 for (i = 0; i < nr_ts; i++) {
441 unsigned long long bw;
442
443 ts = &threadstats[i];
444 rs = &runstats[ts->groupid];
445
446 for (j = 0; j <= DDIR_WRITE; j++) {
447 if (!ts->runtime[j])
448 continue;
449 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
450 rs->min_run[j] = ts->runtime[j];
451 if (ts->runtime[j] > rs->max_run[j])
452 rs->max_run[j] = ts->runtime[j];
453
454 bw = 0;
455 if (ts->runtime[j])
456 bw = ts->io_bytes[j] / (unsigned long long) ts->runtime[j];
457 if (bw < rs->min_bw[j])
458 rs->min_bw[j] = bw;
459 if (bw > rs->max_bw[j])
460 rs->max_bw[j] = bw;
461
462 rs->io_kb[j] += ts->io_bytes[j] >> 10;
463 }
464 }
465
466 for (i = 0; i < groupid + 1; i++) {
467 rs = &runstats[i];
468
469 if (rs->max_run[0])
470 rs->agg[0] = (rs->io_kb[0]*1024) / rs->max_run[0];
471 if (rs->max_run[1])
472 rs->agg[1] = (rs->io_kb[1]*1024) / rs->max_run[1];
473 }
474
475 /*
476 * don't overwrite last signal output
477 */
478 if (!terse_output)
479 printf("\n");
480
481 for (i = 0; i < nr_ts; i++) {
482 ts = &threadstats[i];
483 rs = &runstats[ts->groupid];
484
485 if (terse_output)
486 show_thread_status_terse(ts, rs);
487 else
488 show_thread_status(ts, rs);
489 }
490
491 if (!terse_output) {
492 for (i = 0; i < groupid + 1; i++)
493 show_group_stats(&runstats[i], i);
494
495 show_disk_util();
496 }
497
498 free(runstats);
499 free(threadstats);
500}
501
502static inline void add_stat_sample(struct io_stat *is, unsigned long data)
503{
504 double val = data;
505 double delta;
506
507 if (data > is->max_val)
508 is->max_val = data;
509 if (data < is->min_val)
510 is->min_val = data;
511
512 delta = val - is->mean;
513 if (delta) {
514 is->mean += delta / (is->samples + 1.0);
515 is->S += delta * (val - is->mean);
516 }
517
518 is->samples++;
519}
520
521static void __add_log_sample(struct io_log *iolog, unsigned long val,
522 enum fio_ddir ddir, unsigned long time)
523{
524 if (iolog->nr_samples == iolog->max_samples) {
525 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
526
527 iolog->log = realloc(iolog->log, new_size);
528 iolog->max_samples <<= 1;
529 }
530
531 iolog->log[iolog->nr_samples].val = val;
532 iolog->log[iolog->nr_samples].time = time;
533 iolog->log[iolog->nr_samples].ddir = ddir;
534 iolog->nr_samples++;
535}
536
537static void add_log_sample(struct thread_data *td, struct io_log *iolog,
538 unsigned long val, enum fio_ddir ddir)
539{
540 __add_log_sample(iolog, val, ddir, mtime_since_now(&td->epoch));
541}
542
543void add_agg_sample(unsigned long val, enum fio_ddir ddir)
544{
545 struct io_log *iolog = agg_io_log[ddir];
546
547 __add_log_sample(iolog, val, ddir, mtime_since_genesis());
548}
549
550void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
551 unsigned long msec)
552{
553 struct thread_stat *ts = &td->ts;
554
555 add_stat_sample(&ts->clat_stat[ddir], msec);
556
557 if (ts->clat_log)
558 add_log_sample(td, ts->clat_log, msec, ddir);
559}
560
561void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
562 unsigned long msec)
563{
564 struct thread_stat *ts = &td->ts;
565
566 add_stat_sample(&ts->slat_stat[ddir], msec);
567
568 if (ts->slat_log)
569 add_log_sample(td, ts->slat_log, msec, ddir);
570}
571
572void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
573 struct timeval *t)
574{
575 struct thread_stat *ts = &td->ts;
576 unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
577 unsigned long rate;
578
579 if (spent < td->o.bw_avg_time)
580 return;
581
582 rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
583 add_stat_sample(&ts->bw_stat[ddir], rate);
584
585 if (ts->bw_log)
586 add_log_sample(td, ts->bw_log, rate, ddir);
587
588 fio_gettime(&ts->stat_sample_time[ddir], NULL);
589 ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
590}