X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=stat.c;h=9d93dcd17040d6326a1c8ea9c0226d7dfd363bb6;hb=f07528f0629efa0c55bbdb1bdcc9dfdacc32fb47;hp=33637900df622c2fae42841f19fc0ed5cd9ebdc1;hpb=36833fb04b5f9a734e96a571dfb52fc54b5b95e7;p=fio.git diff --git a/stat.c b/stat.c index 33637900..9d93dcd1 100644 --- a/stat.c +++ b/stat.c @@ -15,6 +15,7 @@ #include "helper_thread.h" #include "smalloc.h" #include "zbd.h" +#include "oslib/asprintf.h" #define LOG_MSEC_SLACK 1 @@ -158,7 +159,7 @@ unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr, * isn't a worry. Also note that this does not work for NaN values. */ if (len > 1) - qsort((void *)plist, len, sizeof(plist[0]), double_cmp); + qsort(plist, len, sizeof(plist[0]), double_cmp); ovals = malloc(len * sizeof(*ovals)); if (!ovals) @@ -258,8 +259,7 @@ static void show_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr, } out: - if (ovals) - free(ovals); + free(ovals); } bool calc_lat(struct io_stat *is, unsigned long long *min, @@ -482,9 +482,12 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, display_lat("clat", min, max, mean, dev, out); if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) display_lat(" lat", min, max, mean, dev, out); + if (calc_lat(&ts->clat_high_prio_stat[ddir], &min, &max, &mean, &dev)) + display_lat("prio_clat", min, max, mean, dev, out); if (ts->clat_percentiles || ts->lat_percentiles) { const char *name = ts->clat_percentiles ? "clat" : " lat"; + char prio_name[32]; uint64_t samples; if (ts->clat_percentiles) @@ -496,6 +499,27 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, samples, ts->percentile_list, ts->percentile_precision, name, out); + + /* Only print this if some high and low priority stats were collected */ + if (ts->clat_high_prio_stat[ddir].samples > 0 && + ts->clat_prio_stat[ddir].samples > 0) + { + sprintf(prio_name, "high prio (%.2f%%) %s", + 100. * (double) ts->clat_high_prio_stat[ddir].samples / (double) samples, + name); + show_clat_percentiles(ts->io_u_plat_high_prio[ddir], + ts->clat_high_prio_stat[ddir].samples, + ts->percentile_list, + ts->percentile_precision, prio_name, out); + + sprintf(prio_name, "low prio (%.2f%%) %s", + 100. * (double) ts->clat_prio_stat[ddir].samples / (double) samples, + name); + show_clat_percentiles(ts->io_u_plat_prio[ddir], + ts->clat_prio_stat[ddir].samples, + ts->percentile_list, + ts->percentile_precision, prio_name, out); + } } if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) { double p_of_agg = 100.0, fkb_base = (double)rs->kb_base; @@ -683,7 +707,7 @@ static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos, * isn't a worry. Also note that this does not work for NaN values. */ if (len > 1) - qsort((void *)plist, len, sizeof(plist[0]), double_cmp); + qsort(plist, len, sizeof(plist[0]), double_cmp); /* Start only after the uninit entries end */ for (nr_uninit = 0; @@ -785,6 +809,218 @@ static void show_ss_normal(struct thread_stat *ts, struct buf_output *out) free(p2); } +static void show_agg_stats(struct disk_util_agg *agg, int terse, + struct buf_output *out) +{ + if (!agg->slavecount) + return; + + if (!terse) { + log_buf(out, ", aggrios=%llu/%llu, aggrmerge=%llu/%llu, " + "aggrticks=%llu/%llu, aggrin_queue=%llu, " + "aggrutil=%3.2f%%", + (unsigned long long) agg->ios[0] / agg->slavecount, + (unsigned long long) agg->ios[1] / agg->slavecount, + (unsigned long long) agg->merges[0] / agg->slavecount, + (unsigned long long) agg->merges[1] / agg->slavecount, + (unsigned long long) agg->ticks[0] / agg->slavecount, + (unsigned long long) agg->ticks[1] / agg->slavecount, + (unsigned long long) agg->time_in_queue / agg->slavecount, + agg->max_util.u.f); + } else { + log_buf(out, ";slaves;%llu;%llu;%llu;%llu;%llu;%llu;%llu;%3.2f%%", + (unsigned long long) agg->ios[0] / agg->slavecount, + (unsigned long long) agg->ios[1] / agg->slavecount, + (unsigned long long) agg->merges[0] / agg->slavecount, + (unsigned long long) agg->merges[1] / agg->slavecount, + (unsigned long long) agg->ticks[0] / agg->slavecount, + (unsigned long long) agg->ticks[1] / agg->slavecount, + (unsigned long long) agg->time_in_queue / agg->slavecount, + agg->max_util.u.f); + } +} + +static void aggregate_slaves_stats(struct disk_util *masterdu) +{ + struct disk_util_agg *agg = &masterdu->agg; + struct disk_util_stat *dus; + struct flist_head *entry; + struct disk_util *slavedu; + double util; + + flist_for_each(entry, &masterdu->slaves) { + slavedu = flist_entry(entry, struct disk_util, slavelist); + dus = &slavedu->dus; + agg->ios[0] += dus->s.ios[0]; + agg->ios[1] += dus->s.ios[1]; + agg->merges[0] += dus->s.merges[0]; + agg->merges[1] += dus->s.merges[1]; + agg->sectors[0] += dus->s.sectors[0]; + agg->sectors[1] += dus->s.sectors[1]; + agg->ticks[0] += dus->s.ticks[0]; + agg->ticks[1] += dus->s.ticks[1]; + agg->time_in_queue += dus->s.time_in_queue; + agg->slavecount++; + + util = (double) (100 * dus->s.io_ticks / (double) slavedu->dus.s.msec); + /* System utilization is the utilization of the + * component with the highest utilization. + */ + if (util > agg->max_util.u.f) + agg->max_util.u.f = util; + + } + + if (agg->max_util.u.f > 100.0) + agg->max_util.u.f = 100.0; +} + +void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg, + int terse, struct buf_output *out) +{ + double util = 0; + + if (dus->s.msec) + util = (double) 100 * dus->s.io_ticks / (double) dus->s.msec; + if (util > 100.0) + util = 100.0; + + if (!terse) { + if (agg->slavecount) + log_buf(out, " "); + + log_buf(out, " %s: ios=%llu/%llu, merge=%llu/%llu, " + "ticks=%llu/%llu, in_queue=%llu, util=%3.2f%%", + dus->name, + (unsigned long long) dus->s.ios[0], + (unsigned long long) dus->s.ios[1], + (unsigned long long) dus->s.merges[0], + (unsigned long long) dus->s.merges[1], + (unsigned long long) dus->s.ticks[0], + (unsigned long long) dus->s.ticks[1], + (unsigned long long) dus->s.time_in_queue, + util); + } else { + log_buf(out, ";%s;%llu;%llu;%llu;%llu;%llu;%llu;%llu;%3.2f%%", + dus->name, + (unsigned long long) dus->s.ios[0], + (unsigned long long) dus->s.ios[1], + (unsigned long long) dus->s.merges[0], + (unsigned long long) dus->s.merges[1], + (unsigned long long) dus->s.ticks[0], + (unsigned long long) dus->s.ticks[1], + (unsigned long long) dus->s.time_in_queue, + util); + } + + /* + * If the device has slaves, aggregate the stats for + * those slave devices also. + */ + show_agg_stats(agg, terse, out); + + if (!terse) + log_buf(out, "\n"); +} + +void json_array_add_disk_util(struct disk_util_stat *dus, + struct disk_util_agg *agg, struct json_array *array) +{ + struct json_object *obj; + double util = 0; + + if (dus->s.msec) + util = (double) 100 * dus->s.io_ticks / (double) dus->s.msec; + if (util > 100.0) + util = 100.0; + + obj = json_create_object(); + json_array_add_value_object(array, obj); + + json_object_add_value_string(obj, "name", dus->name); + json_object_add_value_int(obj, "read_ios", dus->s.ios[0]); + json_object_add_value_int(obj, "write_ios", dus->s.ios[1]); + json_object_add_value_int(obj, "read_merges", dus->s.merges[0]); + json_object_add_value_int(obj, "write_merges", dus->s.merges[1]); + json_object_add_value_int(obj, "read_ticks", dus->s.ticks[0]); + json_object_add_value_int(obj, "write_ticks", dus->s.ticks[1]); + json_object_add_value_int(obj, "in_queue", dus->s.time_in_queue); + json_object_add_value_float(obj, "util", util); + + /* + * If the device has slaves, aggregate the stats for + * those slave devices also. + */ + if (!agg->slavecount) + return; + json_object_add_value_int(obj, "aggr_read_ios", + agg->ios[0] / agg->slavecount); + json_object_add_value_int(obj, "aggr_write_ios", + agg->ios[1] / agg->slavecount); + json_object_add_value_int(obj, "aggr_read_merges", + agg->merges[0] / agg->slavecount); + json_object_add_value_int(obj, "aggr_write_merge", + agg->merges[1] / agg->slavecount); + json_object_add_value_int(obj, "aggr_read_ticks", + agg->ticks[0] / agg->slavecount); + json_object_add_value_int(obj, "aggr_write_ticks", + agg->ticks[1] / agg->slavecount); + json_object_add_value_int(obj, "aggr_in_queue", + agg->time_in_queue / agg->slavecount); + json_object_add_value_float(obj, "aggr_util", agg->max_util.u.f); +} + +static void json_object_add_disk_utils(struct json_object *obj, + struct flist_head *head) +{ + struct json_array *array = json_create_array(); + struct flist_head *entry; + struct disk_util *du; + + json_object_add_value_array(obj, "disk_util", array); + + flist_for_each(entry, head) { + du = flist_entry(entry, struct disk_util, list); + + aggregate_slaves_stats(du); + json_array_add_disk_util(&du->dus, &du->agg, array); + } +} + +void show_disk_util(int terse, struct json_object *parent, + struct buf_output *out) +{ + struct flist_head *entry; + struct disk_util *du; + bool do_json; + + if (!is_running_backend()) + return; + + if (flist_empty(&disk_list)) { + return; + } + + if ((output_format & FIO_OUTPUT_JSON) && parent) + do_json = true; + else + do_json = false; + + if (!terse && !do_json) + log_buf(out, "\nDisk stats (read/write):\n"); + + if (do_json) + json_object_add_disk_utils(parent, &disk_list); + else if (output_format & ~(FIO_OUTPUT_JSON | FIO_OUTPUT_JSON_PLUS)) { + flist_for_each(entry, &disk_list) { + du = flist_entry(entry, struct disk_util, list); + + aggregate_slaves_stats(du); + print_disk_util(&du->dus, &du->agg, terse, out); + } + } +} + static void show_thread_status_normal(struct thread_stat *ts, struct group_run_stats *rs, struct buf_output *out) @@ -797,7 +1033,7 @@ static void show_thread_status_normal(struct thread_stat *ts, if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u)) return; - + memset(time_buf, 0, sizeof(time_buf)); time(&time_p); @@ -955,8 +1191,7 @@ static void show_ddir_status_terse(struct thread_stat *ts, else log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0); - if (ovals) - free(ovals); + free(ovals); bw_stat = calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev); if (bw_stat) { @@ -995,7 +1230,8 @@ static void add_ddir_status_json(struct thread_stat *ts, double mean, dev, iops; unsigned int len; int i; - struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object = NULL; + struct json_object *dir_object, *tmp_object, *percentile_object = NULL, + *clat_bins_object = NULL; char buf[120]; double p_of_agg = 100.0; @@ -1090,34 +1326,145 @@ static void add_ddir_status_json(struct thread_stat *ts, } else len = 0; - percentile_object = json_create_object(); - if (ts->clat_percentiles) + if (ts->clat_percentiles) { + percentile_object = json_create_object(); json_object_add_value_object(tmp_object, "percentile", percentile_object); - for (i = 0; i < len; i++) { - snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f); - json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]); + for (i = 0; i < len; i++) { + snprintf(buf, sizeof(buf), "%f", + ts->percentile_list[i].u.f); + json_object_add_value_int(percentile_object, buf, + ovals[i]); + } } - if (output_format & FIO_OUTPUT_JSON_PLUS) { + free(ovals); + + if (output_format & FIO_OUTPUT_JSON_PLUS && ts->clat_percentiles) { clat_bins_object = json_create_object(); - if (ts->clat_percentiles) - json_object_add_value_object(tmp_object, "bins", clat_bins_object); + json_object_add_value_object(tmp_object, "bins", + clat_bins_object); for(i = 0; i < FIO_IO_U_PLAT_NR; i++) { if (ddir_rw(ddir)) { if (ts->io_u_plat[ddir][i]) { snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i)); - json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]); + json_object_add_value_int(clat_bins_object, buf, ts->io_u_plat[ddir][i]); } } else { if (ts->io_u_sync_plat[i]) { snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i)); - json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_sync_plat[i]); + json_object_add_value_int(clat_bins_object, buf, ts->io_u_sync_plat[i]); } } } } + + /* Only print PRIO latencies if some high priority samples were gathered */ + if (ts->clat_high_prio_stat[ddir].samples > 0) { + /* START OF HIGH PRIO CLAT */ + if (!calc_lat(&ts->clat_high_prio_stat[ddir], &min, &max, &mean, &dev)) { + min = max = 0; + mean = dev = 0.0; + } + tmp_object = json_create_object(); + json_object_add_value_object(dir_object, "clat_prio", + tmp_object); + json_object_add_value_int(tmp_object, "samples", + ts->clat_high_prio_stat[ddir].samples); + json_object_add_value_int(tmp_object, "min", min); + json_object_add_value_int(tmp_object, "max", max); + json_object_add_value_float(tmp_object, "mean", mean); + json_object_add_value_float(tmp_object, "stddev", dev); + + if (ts->clat_percentiles) { + len = calc_clat_percentiles(ts->io_u_plat_high_prio[ddir], + ts->clat_high_prio_stat[ddir].samples, + ts->percentile_list, &ovals, &maxv, + &minv); + } else + len = 0; + + percentile_object = json_create_object(); + json_object_add_value_object(tmp_object, "percentile", percentile_object); + for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) { + if (i >= len) { + json_object_add_value_int(percentile_object, "0.00", 0); + continue; + } + snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f); + json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]); + } + + if (output_format & FIO_OUTPUT_JSON_PLUS) { + clat_bins_object = json_create_object(); + json_object_add_value_object(tmp_object, "bins", clat_bins_object); + for(i = 0; i < FIO_IO_U_PLAT_NR; i++) { + snprintf(buf, sizeof(buf), "%d", i); + json_object_add_value_int(clat_bins_object, (const char *)buf, + ts->io_u_plat_high_prio[ddir][i]); + } + json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_BITS", + FIO_IO_U_PLAT_BITS); + json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_VAL", + FIO_IO_U_PLAT_VAL); + json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_NR", + FIO_IO_U_PLAT_NR); + } + /* END OF HIGH PRIO CLAT */ + + /* START OF PRIO CLAT */ + if (!calc_lat(&ts->clat_prio_stat[ddir], &min, &max, &mean, &dev)) { + min = max = 0; + mean = dev = 0.0; + } + tmp_object = json_create_object(); + json_object_add_value_object(dir_object, "clat_low_prio", + tmp_object); + json_object_add_value_int(tmp_object, "samples", + ts->clat_prio_stat[ddir].samples); + json_object_add_value_int(tmp_object, "min", min); + json_object_add_value_int(tmp_object, "max", max); + json_object_add_value_float(tmp_object, "mean", mean); + json_object_add_value_float(tmp_object, "stddev", dev); + + if (ts->clat_percentiles) { + len = calc_clat_percentiles(ts->io_u_plat_prio[ddir], + ts->clat_prio_stat[ddir].samples, + ts->percentile_list, &ovals, &maxv, + &minv); + } else + len = 0; + + percentile_object = json_create_object(); + json_object_add_value_object(tmp_object, "percentile", percentile_object); + for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) { + if (i >= len) { + json_object_add_value_int(percentile_object, "0.00", 0); + continue; + } + snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f); + json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]); + } + + if (output_format & FIO_OUTPUT_JSON_PLUS) { + clat_bins_object = json_create_object(); + json_object_add_value_object(tmp_object, "bins", clat_bins_object); + for(i = 0; i < FIO_IO_U_PLAT_NR; i++) { + snprintf(buf, sizeof(buf), "%d", i); + json_object_add_value_int(clat_bins_object, (const char *)buf, + ts->io_u_plat_prio[ddir][i]); + } + json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_BITS", + FIO_IO_U_PLAT_BITS); + json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_VAL", + FIO_IO_U_PLAT_VAL); + json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_NR", + FIO_IO_U_PLAT_NR); + } + /* END OF PRIO CLAT */ + } + if (!ddir_rw(ddir)) return; @@ -1136,9 +1483,6 @@ static void add_ddir_status_json(struct thread_stat *ts, if (output_format & FIO_OUTPUT_JSON_PLUS && ts->lat_percentiles) json_object_add_value_object(tmp_object, "bins", clat_bins_object); - if (ovals) - free(ovals); - if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) { if (rs->agg[ddir]) { p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024); @@ -1448,7 +1792,7 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f); json_object_add_value_int(percentile_object, - (const char *)buf, + buf, percentiles[i]); } @@ -1642,6 +1986,8 @@ void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, for (l = 0; l < DDIR_RWDIR_CNT; l++) { if (!dst->unified_rw_rep) { sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first, false); + sum_stat(&dst->clat_high_prio_stat[l], &src->clat_high_prio_stat[l], first, false); + sum_stat(&dst->clat_prio_stat[l], &src->clat_prio_stat[l], first, false); sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first, false); sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first, false); sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first, true); @@ -1653,6 +1999,8 @@ void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, dst->runtime[l] = src->runtime[l]; } else { sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first, false); + sum_stat(&dst->clat_high_prio_stat[l], &src->clat_high_prio_stat[l], first, false); + sum_stat(&dst->clat_prio_stat[l], &src->clat_prio_stat[l], first, false); sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first, false); sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first, false); sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first, true); @@ -1712,10 +2060,16 @@ void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int m; for (m = 0; m < FIO_IO_U_PLAT_NR; m++) { - if (!dst->unified_rw_rep) + if (!dst->unified_rw_rep) { dst->io_u_plat[k][m] += src->io_u_plat[k][m]; - else + dst->io_u_plat_high_prio[k][m] += src->io_u_plat_high_prio[k][m]; + dst->io_u_plat_prio[k][m] += src->io_u_plat_prio[k][m]; + } else { dst->io_u_plat[0][m] += src->io_u_plat[k][m]; + dst->io_u_plat_high_prio[0][m] += src->io_u_plat_high_prio[k][m]; + dst->io_u_plat_prio[0][m] += src->io_u_plat_prio[k][m]; + } + } } @@ -1748,6 +2102,8 @@ void init_thread_stat(struct thread_stat *ts) ts->slat_stat[j].min_val = -1UL; ts->bw_stat[j].min_val = -1UL; ts->iops_stat[j].min_val = -1UL; + ts->clat_high_prio_stat[j].min_val = -1UL; + ts->clat_prio_stat[j].min_val = -1UL; } ts->sync_stat.min_val = -1UL; ts->groupid = -1; @@ -2123,6 +2479,9 @@ static int check_status_file(void) } if (temp_dir == NULL) temp_dir = "/tmp"; +#ifdef __COVERITY__ + __coverity_tainted_data_sanitize__(temp_dir); +#endif snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE); @@ -2325,7 +2684,7 @@ static struct io_logs *get_cur_log(struct io_log *iolog) static void __add_log_sample(struct io_log *iolog, union io_sample_data data, enum fio_ddir ddir, unsigned long long bs, - unsigned long t, uint64_t offset) + unsigned long t, uint64_t offset, uint8_t priority_bit) { struct io_logs *cur_log; @@ -2344,6 +2703,7 @@ static void __add_log_sample(struct io_log *iolog, union io_sample_data data, s->time = t + (iolog->td ? iolog->td->unix_epoch : 0); io_sample_set_ddir(iolog, s, ddir); s->bs = bs; + s->priority_bit = priority_bit; if (iolog->log_offset) { struct io_sample_offset *so = (void *) s; @@ -2371,6 +2731,8 @@ void reset_io_stats(struct thread_data *td) int i, j; for (i = 0; i < DDIR_RWDIR_CNT; i++) { + reset_io_stat(&ts->clat_high_prio_stat[i]); + reset_io_stat(&ts->clat_prio_stat[i]); reset_io_stat(&ts->clat_stat[i]); reset_io_stat(&ts->slat_stat[i]); reset_io_stat(&ts->lat_stat[i]); @@ -2385,6 +2747,8 @@ void reset_io_stats(struct thread_data *td) for (j = 0; j < FIO_IO_U_PLAT_NR; j++) { ts->io_u_plat[i][j] = 0; + ts->io_u_plat_high_prio[i][j] = 0; + ts->io_u_plat_prio[i][j] = 0; if (!i) ts->io_u_sync_plat[j] = 0; } @@ -2412,7 +2776,7 @@ void reset_io_stats(struct thread_data *td) } static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir, - unsigned long elapsed, bool log_max) + unsigned long elapsed, bool log_max, uint8_t priority_bit) { /* * Note an entry in the log. Use the mean from the logged samples, @@ -2427,26 +2791,26 @@ static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir, else data.val = iolog->avg_window[ddir].mean.u.f + 0.50; - __add_log_sample(iolog, data, ddir, 0, elapsed, 0); + __add_log_sample(iolog, data, ddir, 0, elapsed, 0, priority_bit); } reset_io_stat(&iolog->avg_window[ddir]); } static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed, - bool log_max) + bool log_max, uint8_t priority_bit) { int ddir; for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) - __add_stat_to_log(iolog, ddir, elapsed, log_max); + __add_stat_to_log(iolog, ddir, elapsed, log_max, priority_bit); } static unsigned long add_log_sample(struct thread_data *td, struct io_log *iolog, union io_sample_data data, enum fio_ddir ddir, unsigned long long bs, - uint64_t offset) + uint64_t offset, uint8_t priority_bit) { unsigned long elapsed, this_window; @@ -2459,7 +2823,7 @@ static unsigned long add_log_sample(struct thread_data *td, * If no time averaging, just add the log sample. */ if (!iolog->avg_msec) { - __add_log_sample(iolog, data, ddir, bs, elapsed, offset); + __add_log_sample(iolog, data, ddir, bs, elapsed, offset, priority_bit); return 0; } @@ -2483,7 +2847,7 @@ static unsigned long add_log_sample(struct thread_data *td, return diff; } - __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0); + _add_stat_to_log(iolog, elapsed, td->o.log_max != 0, priority_bit); iolog->avg_last[ddir] = elapsed - (this_window - iolog->avg_msec); return iolog->avg_msec; @@ -2496,18 +2860,19 @@ void finalize_logs(struct thread_data *td, bool unit_logs) elapsed = mtime_since_now(&td->epoch); if (td->clat_log && unit_logs) - _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0); + _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0, 0); if (td->slat_log && unit_logs) - _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0); + _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0, 0); if (td->lat_log && unit_logs) - _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0); + _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0, 0); if (td->bw_log && (unit_logs == per_unit_log(td->bw_log))) - _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0); + _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0, 0); if (td->iops_log && (unit_logs == per_unit_log(td->iops_log))) - _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0); + _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0, 0); } -void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs) +void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs, + uint8_t priority_bit) { struct io_log *iolog; @@ -2515,7 +2880,7 @@ void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long return; iolog = agg_io_log[ddir]; - __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0); + __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0, priority_bit); } void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec) @@ -2528,17 +2893,23 @@ void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec) } static void add_clat_percentile_sample(struct thread_stat *ts, - unsigned long long nsec, enum fio_ddir ddir) + unsigned long long nsec, enum fio_ddir ddir, uint8_t priority_bit) { unsigned int idx = plat_val_to_idx(nsec); assert(idx < FIO_IO_U_PLAT_NR); ts->io_u_plat[ddir][idx]++; + + if (!priority_bit) { + ts->io_u_plat_prio[ddir][idx]++; + } else { + ts->io_u_plat_high_prio[ddir][idx]++; + } } void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, unsigned long long nsec, unsigned long long bs, - uint64_t offset) + uint64_t offset, uint8_t priority_bit) { const bool needs_lock = td_async_processing(td); unsigned long elapsed, this_window; @@ -2550,12 +2921,19 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, add_stat_sample(&ts->clat_stat[ddir], nsec); + if (priority_bit) { + add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec); + } else { + add_stat_sample(&ts->clat_prio_stat[ddir], nsec); + } + if (td->clat_log) add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs, - offset); + offset, priority_bit); - if (ts->clat_percentiles) - add_clat_percentile_sample(ts, nsec, ddir); + if (ts->clat_percentiles) { + add_clat_percentile_sample(ts, nsec, ddir, priority_bit); + } if (iolog && iolog->hist_msec) { struct io_hist *hw = &iolog->hist_window[ddir]; @@ -2565,7 +2943,7 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, if (!hw->hist_last) hw->hist_last = elapsed; this_window = elapsed - hw->hist_last; - + if (this_window >= iolog->hist_msec) { uint64_t *io_u_plat; struct io_u_plat_entry *dst; @@ -2580,10 +2958,10 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, io_u_plat = (uint64_t *) td->ts.io_u_plat[ddir]; dst = malloc(sizeof(struct io_u_plat_entry)); memcpy(&(dst->io_u_plat), io_u_plat, - FIO_IO_U_PLAT_NR * sizeof(unsigned int)); + FIO_IO_U_PLAT_NR * sizeof(uint64_t)); flist_add(&dst->list, &hw->list); __add_log_sample(iolog, sample_plat(dst), ddir, bs, - elapsed, offset); + elapsed, offset, priority_bit); /* * Update the last time we recorded as being now, minus @@ -2600,7 +2978,8 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, } void add_slat_sample(struct thread_data *td, enum fio_ddir ddir, - unsigned long usec, unsigned long long bs, uint64_t offset) + unsigned long usec, unsigned long long bs, uint64_t offset, + uint8_t priority_bit) { const bool needs_lock = td_async_processing(td); struct thread_stat *ts = &td->ts; @@ -2614,7 +2993,8 @@ void add_slat_sample(struct thread_data *td, enum fio_ddir ddir, add_stat_sample(&ts->slat_stat[ddir], usec); if (td->slat_log) - add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset); + add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset, + priority_bit); if (needs_lock) __td_io_u_unlock(td); @@ -2622,7 +3002,7 @@ void add_slat_sample(struct thread_data *td, enum fio_ddir ddir, void add_lat_sample(struct thread_data *td, enum fio_ddir ddir, unsigned long long nsec, unsigned long long bs, - uint64_t offset) + uint64_t offset, uint8_t priority_bit) { const bool needs_lock = td_async_processing(td); struct thread_stat *ts = &td->ts; @@ -2637,10 +3017,10 @@ void add_lat_sample(struct thread_data *td, enum fio_ddir ddir, if (td->lat_log) add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs, - offset); + offset, priority_bit); if (ts->lat_percentiles) - add_clat_percentile_sample(ts, nsec, ddir); + add_clat_percentile_sample(ts, nsec, ddir, priority_bit); if (needs_lock) __td_io_u_unlock(td); @@ -2665,7 +3045,7 @@ void add_bw_sample(struct thread_data *td, struct io_u *io_u, if (td->bw_log) add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir, - bytes, io_u->offset); + bytes, io_u->offset, io_u_is_prio(io_u)); td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir]; @@ -2719,7 +3099,7 @@ static int __add_samples(struct thread_data *td, struct timespec *parent_tv, if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) bs = td->o.min_bs[ddir]; - next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0); + next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0, 0); next_log = min(next_log, next); } @@ -2759,7 +3139,7 @@ void add_iops_sample(struct thread_data *td, struct io_u *io_u, if (td->iops_log) add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir, - bytes, io_u->offset); + bytes, io_u->offset, io_u_is_prio(io_u)); td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];