/*
* Rate state
*/
- unsigned long rate_nsec_cycle[2];
+ unsigned long long rate_bps[2];
long rate_pending_usleep[2];
unsigned long rate_bytes[2];
unsigned long rate_blocks[2];
static int __setup_rate(struct thread_data *td, enum fio_ddir ddir)
{
unsigned int bs = td->o.min_bs[ddir];
- unsigned long long bytes_per_sec;
assert(ddir_rw(ddir));
if (td->o.rate[ddir])
- bytes_per_sec = td->o.rate[ddir];
+ td->rate_bps[ddir] = td->o.rate[ddir];
else
- bytes_per_sec = td->o.rate_iops[ddir] * bs;
+ td->rate_bps[ddir] = td->o.rate_iops[ddir] * bs;
- if (!bytes_per_sec) {
+ if (!td->rate_bps[ddir]) {
log_err("rate lower than supported\n");
return -1;
}
- td->rate_nsec_cycle[ddir] = 1000000000ULL / bytes_per_sec;
td->rate_pending_usleep[ddir] = 0;
return 0;
}
add_iops_sample(td, idx, &icd->time);
}
+static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned long long secs, remainder, bps, bytes;
+ bytes = td->this_io_bytes[ddir];
+ bps = td->rate_bps[ddir];
+ secs = bytes / bps;
+ remainder = bytes % bps;
+ return remainder * 1000000 / bps + secs * 1000000;
+}
+
static void io_completed(struct thread_data *td, struct io_u *io_u,
struct io_completion_data *icd)
{
if (__should_check_rate(td, idx)) {
td->rate_pending_usleep[idx] =
- ((td->this_io_bytes[idx] *
- td->rate_nsec_cycle[idx]) / 1000 -
+ (usec_for_io(td, idx) -
utime_since_now(&td->start));
}
- if (__should_check_rate(td, idx ^ 1))
+ if (__should_check_rate(td, odx))
td->rate_pending_usleep[odx] =
- ((td->this_io_bytes[odx] *
- td->rate_nsec_cycle[odx]) / 1000 -
+ (usec_for_io(td, odx) -
utime_since_now(&td->start));
}