static struct timeval genesis;
static unsigned long ns_granularity;
-unsigned long utime_since(struct timeval *s, struct timeval *e)
+unsigned long long utime_since(struct timeval *s, struct timeval *e)
{
long sec, usec;
+ unsigned long long ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
usec += 1000000;
}
- sec *= (double) 1000000;
+ /*
+ * time warp bug on some kernels?
+ */
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ ret = sec * 1000000ULL + usec;
- return sec + usec;
+ return ret;
}
-unsigned long utime_since_now(struct timeval *s)
+unsigned long long utime_since_now(struct timeval *s)
{
struct timeval t;
unsigned long mtime_since(struct timeval *s, struct timeval *e)
{
- long sec, usec;
+ long sec, usec, ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
usec += 1000000;
}
- sec *= (double) 1000;
- usec /= (double) 1000;
+ sec *= 1000UL;
+ usec /= 1000UL;
+ ret = sec + usec;
- return sec + usec;
+ /*
+ * time warp bug on some kernels?
+ */
+ if (ret < 0)
+ ret = 0;
+
+ return ret;
}
unsigned long mtime_since_now(struct timeval *s)
unsigned long usec_cycle;
unsigned int bs;
- if (!td->rate)
+ if (!td->o.rate && !td->o.rate_iops)
return;
if (td_rw(td))
- bs = td->rw_min_bs;
+ bs = td->o.rw_min_bs;
else if (td_read(td))
- bs = td->min_bs[DDIR_READ];
+ bs = td->o.min_bs[DDIR_READ];
else
- bs = td->min_bs[DDIR_WRITE];
+ bs = td->o.min_bs[DDIR_WRITE];
usec_cycle = td->rate_usec_cycle * (bytes / bs);
unsigned long s = usec_cycle - time_spent;
td->rate_pending_usleep += s;
+
if (td->rate_pending_usleep >= 100000) {
+ struct timeval t;
+
+ fio_gettime(&t, NULL);
usec_sleep(td, td->rate_pending_usleep);
- td->rate_pending_usleep = 0;
+ td->rate_pending_usleep -= utime_since_now(&t);
}
} else {
long overtime = time_spent - usec_cycle;