With the aioring engine, we can get notified if a buffered read was
a cache hit or if it hit media. Add that to the output stats for
normal and json output.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
dst->ss_bw_data[i] = le64_to_cpu(src->ss_bw_data[i]);
}
}
dst->ss_bw_data[i] = le64_to_cpu(src->ss_bw_data[i]);
}
}
+
+ dst->cachehit = le64_to_cpu(src->cachehit);
+ dst->cachemiss = le64_to_cpu(src->cachemiss);
}
static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
}
static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
#define IORING_SQ_NEED_WAKEUP (1 << 0)
#define IORING_SQ_NEED_WAKEUP (1 << 0)
+#define IOEV_RES2_CACHEHIT (1 << 0)
+
struct aio_sq_ring {
union {
struct {
struct aio_sq_ring {
union {
struct {
int queued;
int cq_ring_off;
int queued;
int cq_ring_off;
+
+ uint64_t cachehit;
+ uint64_t cachemiss;
};
struct aioring_options {
};
struct aioring_options {
+ if (io_u->ddir == DDIR_READ) {
+ if (ev->res2 & IOEV_RES2_CACHEHIT)
+ ld->cachehit++;
+ else
+ ld->cachemiss++;
+ }
+
struct aioring_data *ld = td->io_ops_data;
if (ld) {
struct aioring_data *ld = td->io_ops_data;
if (ld) {
+ td->ts.cachehit += ld->cachehit;
+ td->ts.cachemiss += ld->cachemiss;
+
/* Bump depth to match init depth */
td->o.iodepth++;
/* Bump depth to match init depth */
td->o.iodepth++;
p.ts.ss_deviation.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f));
p.ts.ss_criterion.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f));
p.ts.ss_deviation.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f));
p.ts.ss_criterion.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f));
+ p.ts.cachehit = cpu_to_le64(ts->cachehit);
+ p.ts.cachemiss = cpu_to_le64(ts->cachemiss);
+
convert_gs(&p.rs, rs);
dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state);
convert_gs(&p.rs, rs);
dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state);
FIO_SERVER_MAX_FRAGMENT_PDU = 1024,
FIO_SERVER_MAX_CMD_MB = 2048,
FIO_SERVER_MAX_FRAGMENT_PDU = 1024,
FIO_SERVER_MAX_CMD_MB = 2048,
unsigned long runt;
unsigned long long min, max, bw, iops;
double mean, dev;
unsigned long runt;
unsigned long long min, max, bw, iops;
double mean, dev;
- char *io_p, *bw_p, *bw_p_alt, *iops_p, *zbd_w_st = NULL;
+ char *io_p, *bw_p, *bw_p_alt, *iops_p, *post_st = NULL;
int i2p;
if (ddir_sync(ddir)) {
int i2p;
if (ddir_sync(ddir)) {
iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE);
if (ddir == DDIR_WRITE)
iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE);
if (ddir == DDIR_WRITE)
- zbd_w_st = zbd_write_status(ts);
+ post_st = zbd_write_status(ts);
+ else if (ddir == DDIR_READ && ts->cachehit && ts->cachemiss) {
+ uint64_t total;
+ double hit;
+
+ total = ts->cachehit + ts->cachemiss;
+ hit = (double) ts->cachehit / (double) total;
+ hit *= 100.0;
+ if (asprintf(&post_st, "; Cachehit=%0.2f%%", hit) < 0)
+ post_st = NULL;
+ }
log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)%s\n",
rs->unified_rw_rep ? "mixed" : io_ddir_name(ddir),
iops_p, bw_p, bw_p_alt, io_p,
(unsigned long long) ts->runtime[ddir],
log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)%s\n",
rs->unified_rw_rep ? "mixed" : io_ddir_name(ddir),
iops_p, bw_p, bw_p_alt, io_p,
(unsigned long long) ts->runtime[ddir],
free(io_p);
free(bw_p);
free(bw_p_alt);
free(io_p);
free(bw_p);
free(bw_p_alt);
json_object_add_value_float(dir_object, "iops_stddev", dev);
json_object_add_value_int(dir_object, "iops_samples",
(&ts->iops_stat[ddir])->samples);
json_object_add_value_float(dir_object, "iops_stddev", dev);
json_object_add_value_int(dir_object, "iops_samples",
(&ts->iops_stat[ddir])->samples);
+
+ if (ts->cachehit + ts->cachemiss) {
+ uint64_t total;
+ double hit;
+
+ total = ts->cachehit + ts->cachemiss;
+ hit = (double) ts->cachehit / (double) total;
+ hit *= 100.0;
+ json_object_add_value_float(dir_object, "cachehit", hit);
+ }
}
static void show_thread_status_terse_all(struct thread_stat *ts,
}
static void show_thread_status_terse_all(struct thread_stat *ts,
dst->total_submit += src->total_submit;
dst->total_complete += src->total_complete;
dst->nr_zone_resets += src->nr_zone_resets;
dst->total_submit += src->total_submit;
dst->total_complete += src->total_complete;
dst->nr_zone_resets += src->nr_zone_resets;
+ dst->cachehit += src->cachehit;
+ dst->cachemiss += src->cachemiss;
}
void init_group_run_stat(struct group_run_stats *gs)
}
void init_group_run_stat(struct group_run_stats *gs)
ts->total_submit = 0;
ts->total_complete = 0;
ts->nr_zone_resets = 0;
ts->total_submit = 0;
ts->total_complete = 0;
ts->nr_zone_resets = 0;
+ ts->cachehit = ts->cachemiss = 0;
}
static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
}
static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
uint64_t *ss_bw_data;
uint64_t pad5;
};
uint64_t *ss_bw_data;
uint64_t pad5;
};
+
+ uint64_t cachehit;
+ uint64_t cachemiss;
} __attribute__((packed));
struct jobs_eta {
} __attribute__((packed));
struct jobs_eta {