dst->total_err_count = le64_to_cpu(src->total_err_count);
dst->first_error = le32_to_cpu(src->first_error);
dst->kb_base = le32_to_cpu(src->kb_base);
+ dst->unit_base = le32_to_cpu(src->unit_base);
}
static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
}
dst->kb_base = le32_to_cpu(src->kb_base);
+ dst->unit_base = le32_to_cpu(src->unit_base);
dst->groupid = le32_to_cpu(src->groupid);
dst->unified_rw_rep = le32_to_cpu(src->unified_rw_rep);
}
je->elapsed_sec = le64_to_cpu(je->elapsed_sec);
je->eta_sec = le64_to_cpu(je->eta_sec);
je->is_pow2 = le32_to_cpu(je->is_pow2);
+ je->unit_base = le32_to_cpu(je->unit_base);
}
static void sum_jobs_eta(struct jobs_eta *dst, struct jobs_eta *je)
.options = options,
.option_struct_size = sizeof(struct netio_options),
.flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR |
- FIO_PIPEIO,
+ FIO_PIPEIO | FIO_BIT_BASED,
};
static int str_hostname_cb(void *data, const char *input)
unified_rw_rep += td->o.unified_rw_rep;
if (is_power_of_2(td->o.kb_base))
je->is_pow2 = 1;
+ je->unit_base = td->o.unit_base;
if (td->o.bw_avg_time < bw_avg_time)
bw_avg_time = td->o.bw_avg_time;
if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
rate_str[ddir] = num2str(je->rate[ddir], 5,
- 1024, je->is_pow2, 8);
+ 1024, je->is_pow2, je->unit_base);
iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, 0);
}
enum td_ddir td_ddir;
unsigned int rw_seq;
unsigned int kb_base;
+ unsigned int unit_base;
unsigned int ddir_seq_nr;
long ddir_seq_add;
unsigned int iodepth;
}
}
+ if (!o->unit_base) {
+ if (td->io_ops->flags & FIO_BIT_BASED)
+ o->unit_base = 1;
+ else
+ o->unit_base = 8;
+ }
+
#ifndef CONFIG_FDATASYNC
if (o->fdatasync_blocks) {
log_info("fio: this platform does not support fdatasync()"
FIO_PIPEIO = 1 << 7, /* input/output no seekable */
FIO_BARRIER = 1 << 8, /* engine supports barriers */
FIO_MEMALIGN = 1 << 9, /* engine wants aligned memory */
+ FIO_BIT_BASED = 1 << 10, /* engine uses a bit base (e.g. uses Kbit as opposed to KB) */
};
/*
return 0;
}
+static int unit_base_verify(struct fio_option *o, void *data)
+{
+ struct thread_data *td = data;
+
+ /* 0 = default, pick based on engine
+ * 1 = use bits
+ * 8 = use bytes
+ */
+ if (td->o.unit_base != 0 &&
+ td->o.unit_base != 1 &&
+ td->o.unit_base != 8) {
+ log_err("fio: unit_base set to nonsensical value: %u\n",
+ td->o.unit_base);
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Map of job/command line options
*/
.def = "1024",
.help = "How many bytes per KB for reporting (1000 or 1024)",
},
+ {
+ .name = "unit_base",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(unit_base),
+ .verify = unit_base_verify,
+ .prio = 1,
+ .def = "0",
+ .help = "Bit multiple of result summary data (8 for byte, 1 for bit)",
+ },
{
.name = "lockfile",
.type = FIO_OPT_STR,
}
dst->kb_base = cpu_to_le32(src->kb_base);
+ dst->unit_base = cpu_to_le32(src->unit_base);
dst->groupid = cpu_to_le32(src->groupid);
dst->unified_rw_rep = cpu_to_le32(src->unified_rw_rep);
}
p.ts.total_err_count = cpu_to_le64(ts->total_err_count);
p.ts.first_error = cpu_to_le32(ts->first_error);
p.ts.kb_base = cpu_to_le32(ts->kb_base);
+ p.ts.unit_base = cpu_to_le32(ts->unit_base);
convert_gs(&p.rs, rs);
continue;
p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p, 8);
- p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, 8);
- p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, 8);
- p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, 8);
+ p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, rs->unit_base);
+ p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
+ p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
log_info("%s: io=%s, aggrb=%s/s, minb=%s/s, maxb=%s/s,"
" mint=%llumsec, maxt=%llumsec\n",
bw = (1000 * ts->io_bytes[ddir]) / runt;
io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8);
- bw_p = num2str(bw, 6, 1, i2p, 8);
+ bw_p = num2str(bw, 6, 1, i2p, ts->unit_base);
iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
iops_p = num2str(iops, 6, 1, 0, 0);
struct thread_stat *threadstats, *ts;
int i, j, nr_ts, last_ts, idx;
int kb_base_warned = 0;
+ int unit_base_warned = 0;
struct json_object *root = NULL;
struct json_array *array = NULL;
ts->pid = td->pid;
ts->kb_base = td->o.kb_base;
+ ts->unit_base = td->o.unit_base;
ts->unified_rw_rep = td->o.unified_rw_rep;
} else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
log_info("fio: kb_base differs for jobs in group, using"
" %u as the base\n", ts->kb_base);
kb_base_warned = 1;
+ } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
+ log_info("fio: unit_base differs for jobs in group, using"
+ " %u as the base\n", ts->unit_base);
+ unit_base_warned = 1;
}
ts->continue_on_error = td->o.continue_on_error;
ts = &threadstats[i];
rs = &runstats[ts->groupid];
rs->kb_base = ts->kb_base;
+ rs->unit_base = ts->unit_base;
rs->unified_rw_rep += ts->unified_rw_rep;
for (j = 0; j < DDIR_RWDIR_CNT; j++) {
uint64_t io_kb[DDIR_RWDIR_CNT];
uint64_t agg[DDIR_RWDIR_CNT];
uint32_t kb_base;
+ uint32_t unit_base;
uint32_t groupid;
uint32_t unified_rw_rep;
};
uint32_t first_error;
uint32_t kb_base;
+ uint32_t unit_base;
};
struct jobs_eta {
uint64_t elapsed_sec;
uint64_t eta_sec;
uint32_t is_pow2;
+ uint32_t unit_base;
/*
* Network 'copy' of run_str[]