13 void update_rusage_stat(struct thread_data *td)
15 struct thread_stat *ts = &td->ts;
17 getrusage(RUSAGE_SELF, &ts->ru_end);
19 ts->usr_time += mtime_since(&ts->ru_start.ru_utime,
20 &ts->ru_end.ru_utime);
21 ts->sys_time += mtime_since(&ts->ru_start.ru_stime,
22 &ts->ru_end.ru_stime);
23 ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw
24 - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
25 ts->minf += ts->ru_end.ru_minflt - ts->ru_start.ru_minflt;
26 ts->majf += ts->ru_end.ru_majflt - ts->ru_start.ru_majflt;
28 memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
31 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
32 double *mean, double *dev)
34 double n = is->samples;
42 n = (double) is->samples;
46 *dev = sqrt(is->S / (n - 1.0));
53 static void show_group_stats(struct group_run_stats *rs, int id)
55 char *p1, *p2, *p3, *p4;
56 const char *ddir_str[] = { " READ", " WRITE" };
59 log_info("\nRun status group %d (all jobs):\n", id);
61 for (i = 0; i <= DDIR_WRITE; i++) {
65 p1 = num2str(rs->io_kb[i], 6, 1024, 1);
66 p2 = num2str(rs->agg[i], 6, 1024, 1);
67 p3 = num2str(rs->min_bw[i], 6, 1024, 1);
68 p4 = num2str(rs->max_bw[i], 6, 1024, 1);
70 log_info("%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s,"
71 " mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2,
72 p3, p4, rs->min_run[i],
82 #define ts_total_io_u(ts) \
83 ((ts)->total_io_u[0] + (ts)->total_io_u[1])
85 static void stat_calc_dist(unsigned int *map, unsigned long total,
91 * Do depth distribution calculations
93 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
95 io_u_dist[i] = (double) map[i] / (double) total;
96 io_u_dist[i] *= 100.0;
97 if (io_u_dist[i] < 0.1 && map[i])
104 static void stat_calc_lat(struct thread_stat *ts, double *dst,
105 unsigned int *src, int nr)
107 unsigned long total = ts_total_io_u(ts);
111 * Do latency distribution calculations
113 for (i = 0; i < nr; i++) {
115 dst[i] = (double) src[i] / (double) total;
117 if (dst[i] < 0.01 && src[i])
124 static void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
126 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
129 static void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
131 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
134 static int usec_to_msec(unsigned long *min, unsigned long *max, double *mean,
137 if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
148 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
151 const char *ddir_str[] = { "read ", "write" };
152 unsigned long min, max, runt;
153 unsigned long long bw, iops;
155 char *io_p, *bw_p, *iops_p;
157 if (!ts->runtime[ddir])
160 runt = ts->runtime[ddir];
162 bw = (1000 * ts->io_bytes[ddir]) / runt;
163 io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1024, 1);
164 bw_p = num2str(bw >> 10, 6, 1024, 1);
166 iops = (1000 * ts->total_io_u[ddir]) / runt;
167 iops_p = num2str(iops, 6, 1, 0);
169 log_info(" %s: io=%siB, bw=%siB/s, iops=%s, runt=%6lumsec\n",
170 ddir_str[ddir], io_p, bw_p, iops_p,
177 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
178 const char *base = "(usec)";
181 if (!usec_to_msec(&min, &max, &mean, &dev))
184 minp = num2str(min, 6, 1, 0);
185 maxp = num2str(max, 6, 1, 0);
187 log_info(" slat %s: min=%s, max=%s, avg=%5.02f,"
188 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
193 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
194 const char *base = "(usec)";
197 if (!usec_to_msec(&min, &max, &mean, &dev))
200 minp = num2str(min, 6, 1, 0);
201 maxp = num2str(max, 6, 1, 0);
203 log_info(" clat %s: min=%s, max=%s, avg=%5.02f,"
204 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
209 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
212 p_of_agg = mean * 100 / (double) rs->agg[ddir];
213 log_info(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%,"
214 " avg=%5.02f, stdev=%5.02f\n", min, max, p_of_agg,
219 static void show_lat(double *io_u_lat, int nr, const char **ranges,
222 int new_line = 1, i, line = 0;
224 for (i = 0; i < nr; i++) {
225 if (io_u_lat[i] <= 0.0)
230 log_info(" lat (%s): ", msg);
236 log_info("%s%3.2f%%", ranges[i], io_u_lat[i]);
243 static void show_lat_u(double *io_u_lat_u)
245 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
246 "250=", "500=", "750=", "1000=", };
248 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec");
251 static void show_lat_m(double *io_u_lat_m)
253 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
254 "250=", "500=", "750=", "1000=", "2000=",
257 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec");
260 static void show_latencies(double *io_u_lat_u, double *io_u_lat_m)
262 show_lat_u(io_u_lat_u);
264 show_lat_m(io_u_lat_m);
268 static void show_thread_status(struct thread_stat *ts,
269 struct group_run_stats *rs)
271 double usr_cpu, sys_cpu;
272 unsigned long runtime;
273 double io_u_dist[FIO_IO_U_MAP_NR];
274 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
275 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
277 if (!(ts->io_bytes[0] + ts->io_bytes[1]) &&
278 !(ts->total_io_u[0] + ts->total_io_u[1]))
282 log_info("%s: (groupid=%d, jobs=%d): err=%2d: pid=%d\n",
283 ts->name, ts->groupid, ts->members,
284 ts->error, (int) ts->pid);
286 log_info("%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d\n",
287 ts->name, ts->groupid, ts->members,
288 ts->error, ts->verror, (int) ts->pid);
292 log_info(" Description : [%s]\n", ts->description);
294 if (ts->io_bytes[DDIR_READ])
295 show_ddir_status(rs, ts, DDIR_READ);
296 if (ts->io_bytes[DDIR_WRITE])
297 show_ddir_status(rs, ts, DDIR_WRITE);
299 runtime = ts->total_run_time;
301 double runt = (double) runtime;
303 usr_cpu = (double) ts->usr_time * 100 / runt;
304 sys_cpu = (double) ts->sys_time * 100 / runt;
310 log_info(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu, majf=%lu,"
311 " minf=%lu\n", usr_cpu, sys_cpu, ts->ctx, ts->majf, ts->minf);
313 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
314 log_info(" IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
315 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
316 io_u_dist[1], io_u_dist[2],
317 io_u_dist[3], io_u_dist[4],
318 io_u_dist[5], io_u_dist[6]);
320 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
321 log_info(" submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
322 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
323 io_u_dist[1], io_u_dist[2],
324 io_u_dist[3], io_u_dist[4],
325 io_u_dist[5], io_u_dist[6]);
326 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
327 log_info(" complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
328 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
329 io_u_dist[1], io_u_dist[2],
330 io_u_dist[3], io_u_dist[4],
331 io_u_dist[5], io_u_dist[6]);
332 log_info(" issued r/w: total=%lu/%lu, short=%lu/%lu\n",
333 ts->total_io_u[0], ts->total_io_u[1],
334 ts->short_io_u[0], ts->short_io_u[1]);
335 stat_calc_lat_u(ts, io_u_lat_u);
336 stat_calc_lat_m(ts, io_u_lat_m);
337 show_latencies(io_u_lat_u, io_u_lat_m);
338 if (ts->continue_on_error) {
339 log_info(" errors : total=%lu, first_error=%d/<%s>\n",
342 strerror(ts->first_error));
346 static void show_ddir_status_terse(struct thread_stat *ts,
347 struct group_run_stats *rs, int ddir)
349 unsigned long min, max;
350 unsigned long long bw;
354 if (ts->runtime[ddir])
355 bw = ts->io_bytes[ddir] / ts->runtime[ddir];
357 log_info(";%llu;%llu;%lu", ts->io_bytes[ddir] >> 10, bw,
360 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
361 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
363 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
365 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
366 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
368 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
370 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
373 p_of_agg = mean * 100 / (double) rs->agg[ddir];
374 log_info(";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
376 log_info(";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
380 static void show_thread_status_terse(struct thread_stat *ts,
381 struct group_run_stats *rs)
383 double io_u_dist[FIO_IO_U_MAP_NR];
384 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
385 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
386 double usr_cpu, sys_cpu;
389 log_info("%s;%d;%d", ts->name, ts->groupid, ts->error);
391 show_ddir_status_terse(ts, rs, 0);
392 show_ddir_status_terse(ts, rs, 1);
394 if (ts->total_run_time) {
395 double runt = (double) ts->total_run_time;
397 usr_cpu = (double) ts->usr_time * 100 / runt;
398 sys_cpu = (double) ts->sys_time * 100 / runt;
404 log_info(";%f%%;%f%%;%lu;%lu;%lu", usr_cpu, sys_cpu, ts->ctx, ts->majf,
407 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
408 stat_calc_lat_u(ts, io_u_lat_u);
409 stat_calc_lat_m(ts, io_u_lat_m);
411 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
412 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
413 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
415 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
416 log_info(";%3.2f%%", io_u_lat_u[i]);
417 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
418 log_info(";%3.2f%%", io_u_lat_m[i]);
419 if (ts->continue_on_error)
420 log_info(";%lu;%d", ts->total_err_count, ts->first_error);
424 log_info(";%s", ts->description);
429 static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
433 dst->min_val = min(dst->min_val, src->min_val);
434 dst->max_val = max(dst->max_val, src->max_val);
435 dst->samples += src->samples;
438 * Needs a new method for calculating stddev, we cannot just
439 * average them we do below for nr > 1
445 mean = ((src->mean * (double) (nr - 1))
446 + dst->mean) / ((double) nr);
447 S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
454 void show_run_stats(void)
456 struct group_run_stats *runstats, *rs;
457 struct thread_data *td;
458 struct thread_stat *threadstats, *ts;
459 int i, j, k, l, nr_ts, last_ts, idx;
461 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
463 for (i = 0; i < groupid + 1; i++) {
466 memset(rs, 0, sizeof(*rs));
467 rs->min_bw[0] = rs->min_run[0] = ~0UL;
468 rs->min_bw[1] = rs->min_run[1] = ~0UL;
472 * find out how many threads stats we need. if group reporting isn't
473 * enabled, it's one-per-td.
478 if (!td->o.group_reporting) {
482 if (last_ts == td->groupid)
485 last_ts = td->groupid;
489 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
491 for (i = 0; i < nr_ts; i++) {
492 ts = &threadstats[i];
494 memset(ts, 0, sizeof(*ts));
495 for (j = 0; j <= DDIR_WRITE; j++) {
496 ts->clat_stat[j].min_val = -1UL;
497 ts->slat_stat[j].min_val = -1UL;
498 ts->bw_stat[j].min_val = -1UL;
507 if (idx && (!td->o.group_reporting ||
508 (td->o.group_reporting && last_ts != td->groupid))) {
513 last_ts = td->groupid;
515 ts = &threadstats[j];
520 if (ts->groupid == -1) {
522 * These are per-group shared already
524 ts->name = td->o.name;
525 ts->description = td->o.description;
526 ts->groupid = td->groupid;
529 * first pid in group, not very useful...
534 ts->continue_on_error = td->o.continue_on_error;
535 ts->total_err_count += td->total_err_count;
536 ts->first_error = td->first_error;
538 if (!td->error && td->o.continue_on_error &&
540 ts->error = td->first_error;
541 ts->verror = td->verror;
542 } else if (td->error) {
543 ts->error = td->error;
544 ts->verror = td->verror;
548 for (l = 0; l <= DDIR_WRITE; l++) {
549 sum_stat(&ts->clat_stat[l], &td->ts.clat_stat[l], idx);
550 sum_stat(&ts->slat_stat[l], &td->ts.slat_stat[l], idx);
551 sum_stat(&ts->bw_stat[l], &td->ts.bw_stat[l], idx);
553 ts->stat_io_bytes[l] += td->ts.stat_io_bytes[l];
554 ts->io_bytes[l] += td->ts.io_bytes[l];
556 if (ts->runtime[l] < td->ts.runtime[l])
557 ts->runtime[l] = td->ts.runtime[l];
560 ts->usr_time += td->ts.usr_time;
561 ts->sys_time += td->ts.sys_time;
562 ts->ctx += td->ts.ctx;
563 ts->majf += td->ts.majf;
564 ts->minf += td->ts.minf;
566 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
567 ts->io_u_map[k] += td->ts.io_u_map[k];
568 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
569 ts->io_u_submit[k] += td->ts.io_u_submit[k];
570 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
571 ts->io_u_complete[k] += td->ts.io_u_complete[k];
572 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
573 ts->io_u_lat_u[k] += td->ts.io_u_lat_u[k];
574 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
575 ts->io_u_lat_m[k] += td->ts.io_u_lat_m[k];
578 for (k = 0; k <= DDIR_WRITE; k++) {
579 ts->total_io_u[k] += td->ts.total_io_u[k];
580 ts->short_io_u[k] += td->ts.short_io_u[k];
583 ts->total_run_time += td->ts.total_run_time;
584 ts->total_submit += td->ts.total_submit;
585 ts->total_complete += td->ts.total_complete;
588 for (i = 0; i < nr_ts; i++) {
589 unsigned long long bw;
591 ts = &threadstats[i];
592 rs = &runstats[ts->groupid];
594 for (j = 0; j <= DDIR_WRITE; j++) {
597 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
598 rs->min_run[j] = ts->runtime[j];
599 if (ts->runtime[j] > rs->max_run[j])
600 rs->max_run[j] = ts->runtime[j];
603 if (ts->runtime[j]) {
606 runt = ts->runtime[j] * 1024 / 1000;
607 bw = ts->io_bytes[j] / runt;
609 if (bw < rs->min_bw[j])
611 if (bw > rs->max_bw[j])
614 rs->io_kb[j] += ts->io_bytes[j] >> 10;
618 for (i = 0; i < groupid + 1; i++) {
619 unsigned long max_run[2];
622 max_run[0] = rs->max_run[0] * 1024 / 1000;
623 max_run[1] = rs->max_run[1] * 1024 / 1000;
626 rs->agg[0] = (rs->io_kb[0]*1024) / max_run[0];
628 rs->agg[1] = (rs->io_kb[1]*1024) / max_run[1];
632 * don't overwrite last signal output
637 for (i = 0; i < nr_ts; i++) {
638 ts = &threadstats[i];
639 rs = &runstats[ts->groupid];
642 show_thread_status_terse(ts, rs);
644 show_thread_status(ts, rs);
648 for (i = 0; i < groupid + 1; i++)
649 show_group_stats(&runstats[i], i);
658 static inline void add_stat_sample(struct io_stat *is, unsigned long data)
663 if (data > is->max_val)
665 if (data < is->min_val)
668 delta = val - is->mean;
670 is->mean += delta / (is->samples + 1.0);
671 is->S += delta * (val - is->mean);
677 static void __add_log_sample(struct io_log *iolog, unsigned long val,
678 enum fio_ddir ddir, unsigned int bs,
681 const int nr_samples = iolog->nr_samples;
683 if (iolog->nr_samples == iolog->max_samples) {
684 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
686 iolog->log = realloc(iolog->log, new_size);
687 iolog->max_samples <<= 1;
690 iolog->log[nr_samples].val = val;
691 iolog->log[nr_samples].time = time;
692 iolog->log[nr_samples].ddir = ddir;
693 iolog->log[nr_samples].bs = bs;
697 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
698 unsigned long val, enum fio_ddir ddir,
701 __add_log_sample(iolog, val, ddir, bs, mtime_since_now(&td->epoch));
704 void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)
706 struct io_log *iolog = agg_io_log[ddir];
708 __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis());
711 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
712 unsigned long usec, unsigned int bs)
714 struct thread_stat *ts = &td->ts;
716 add_stat_sample(&ts->clat_stat[ddir], usec);
719 add_log_sample(td, ts->clat_log, usec, ddir, bs);
722 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
723 unsigned long usec, unsigned int bs)
725 struct thread_stat *ts = &td->ts;
727 add_stat_sample(&ts->slat_stat[ddir], usec);
730 add_log_sample(td, ts->slat_log, usec, ddir, bs);
733 void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
736 struct thread_stat *ts = &td->ts;
737 unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
740 if (spent < td->o.bw_avg_time)
743 rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) * 1000 / spent / 1024;
744 add_stat_sample(&ts->bw_stat[ddir], rate);
747 add_log_sample(td, ts->bw_log, rate, ddir, bs);
749 fio_gettime(&ts->stat_sample_time[ddir], NULL);
750 ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];