static int __setup_rate(struct thread_data *td, enum fio_ddir ddir)
{
unsigned int bs = td->o.min_bs[ddir];
- unsigned long long rate;
- unsigned long ios_per_msec;
+ unsigned long long bytes_per_sec;
- if (td->o.rate[ddir]) {
- rate = td->o.rate[ddir];
- ios_per_msec = (rate * 1000LL) / bs;
- } else
- ios_per_msec = td->o.rate_iops[ddir] * 1000UL;
+ if (td->o.rate[ddir])
+ bytes_per_sec = td->o.rate[ddir];
+ else
+ bytes_per_sec = td->o.rate_iops[ddir] * bs;
- if (!ios_per_msec) {
+ if (!bytes_per_sec) {
log_err("rate lower than supported\n");
return -1;
}
- td->rate_usec_cycle[ddir] = 1000000000ULL / ios_per_msec;
+ td->rate_nsec_cycle[ddir] = 1000000000ULL / bytes_per_sec;
td->rate_pending_usleep[ddir] = 0;
return 0;
}
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
const enum fio_ddir idx = io_u->ddir;
+ const enum fio_ddir odx = io_u->ddir ^ 1;
int ret;
td->io_blocks[idx]++;
if (ramp_time_over(td)) {
unsigned long uninitialized_var(lusec);
- unsigned long uninitialized_var(rusec);
if (!td->o.disable_clat || !td->o.disable_bw)
lusec = utime_since(&io_u->issue_time,
&icd->time);
- if (__should_check_rate(td, idx) ||
- __should_check_rate(td, idx ^ 1))
- rusec = utime_since(&io_u->start_time,
- &icd->time);
if (!td->o.disable_clat) {
add_clat_sample(td, idx, lusec, bytes);
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
if (__should_check_rate(td, idx)) {
- td->rate_pending_usleep[idx] +=
- (long) td->rate_usec_cycle[idx] - rusec;
+ td->rate_pending_usleep[idx] =
+ ((td->this_io_bytes[idx] *
+ td->rate_nsec_cycle[idx]) / 1000 -
+ utime_since_now(&td->start));
}
if (__should_check_rate(td, idx ^ 1))
- td->rate_pending_usleep[idx ^ 1] -= rusec;
+ td->rate_pending_usleep[odx] =
+ ((td->this_io_bytes[odx] *
+ td->rate_nsec_cycle[odx]) / 1000 -
+ utime_since_now(&td->start));
}
if (td_write(td) && idx == DDIR_WRITE &&