For async engines, we look only at completions. But we could have
a bunch inflight with a high queue depth, making us go higher than
we should.
Signed-off-by: Jens Axboe <axboe@fb.com>
unsigned long long bytes, limit;
if (td_rw(td))
unsigned long long bytes, limit;
if (td_rw(td))
- bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
+ bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
- bytes = td->this_io_bytes[DDIR_WRITE];
+ bytes = td->io_issue_bytes[DDIR_WRITE];
- bytes = td->this_io_bytes[DDIR_READ];
+ bytes = td->io_issue_bytes[DDIR_READ];
- bytes = td->this_io_bytes[DDIR_TRIM];
+ bytes = td->io_issue_bytes[DDIR_TRIM];
if (td->o.io_limit)
limit = td->o.io_limit;
if (td->o.io_limit)
limit = td->o.io_limit;
uint64_t total_io_size;
uint64_t fill_device_size;
uint64_t total_io_size;
uint64_t fill_device_size;
uint64_t io_issues[DDIR_RWDIR_CNT];
uint64_t io_issues[DDIR_RWDIR_CNT];
+ uint64_t io_issue_bytes[DDIR_RWDIR_CNT];
+
+ /*
+ * Completions
+ */
uint64_t io_blocks[DDIR_RWDIR_CNT];
uint64_t this_io_blocks[DDIR_RWDIR_CNT];
uint64_t io_bytes[DDIR_RWDIR_CNT];
uint64_t io_blocks[DDIR_RWDIR_CNT];
uint64_t this_io_blocks[DDIR_RWDIR_CNT];
uint64_t io_bytes[DDIR_RWDIR_CNT];
sizeof(struct timeval));
}
sizeof(struct timeval));
}
- if (ddir_rw(acct_ddir(io_u)))
+ if (ddir_rw(acct_ddir(io_u))) {
td->io_issues[acct_ddir(io_u)]++;
td->io_issues[acct_ddir(io_u)]++;
+ td->io_issue_bytes[acct_ddir(io_u)] += io_u->xfer_buflen;
+ }
ret = td->io_ops->queue(td, io_u);
ret = td->io_ops->queue(td, io_u);