static struct timeval genesis;
static unsigned long ns_granularity;
-unsigned long long utime_since(struct timeval *s, struct timeval *e)
-{
- long sec, usec;
- unsigned long long ret;
-
- sec = e->tv_sec - s->tv_sec;
- usec = e->tv_usec - s->tv_usec;
- if (sec > 0 && usec < 0) {
- sec--;
- usec += 1000000;
- }
-
- /*
- * time warp bug on some kernels?
- */
- if (sec < 0 || (sec == 0 && usec < 0))
- return 0;
-
- ret = sec * 1000000ULL + usec;
-
- return ret;
-}
-
-unsigned long long utime_since_now(struct timeval *s)
-{
- struct timeval t;
-
- fio_gettime(&t, NULL);
- return utime_since(s, &t);
-}
-
-unsigned long mtime_since(struct timeval *s, struct timeval *e)
-{
- long sec, usec, ret;
-
- sec = e->tv_sec - s->tv_sec;
- usec = e->tv_usec - s->tv_usec;
- if (sec > 0 && usec < 0) {
- sec--;
- usec += 1000000;
- }
-
- sec *= 1000UL;
- usec /= 1000UL;
- ret = sec + usec;
-
- /*
- * time warp bug on some kernels?
- */
- if (ret < 0)
- ret = 0;
-
- return ret;
-}
-
-unsigned long mtime_since_now(struct timeval *s)
-{
- struct timeval t;
- void *p = __builtin_return_address(0);
-
- fio_gettime(&t, p);
- return mtime_since(s, &t);
-}
-
-unsigned long time_since_now(struct timeval *s)
-{
- return mtime_since_now(s) / 1000;
-}
-
/*
* busy looping version for the last few usec
*/
} while (!td->terminate);
}
-long rate_throttle(struct thread_data *td, unsigned long time_spent,
- unsigned long bytes, enum fio_ddir ddir)
+uint64_t mtime_since_genesis(void)
{
- unsigned int bs = td->o.min_bs[ddir];
- unsigned long usec_cycle;
-
- if (!td->o.rate[ddir] && !td->o.rate_iops[ddir])
- return 0;
-
- usec_cycle = td->rate_usec_cycle[ddir] * (bytes / bs);
-
- if (time_spent < usec_cycle) {
- unsigned long s = usec_cycle - time_spent;
-
- td->rate_pending_usleep[ddir] += s;
- } else {
- long overtime = time_spent - usec_cycle;
-
- td->rate_pending_usleep[ddir] -= overtime;
- }
-
- return td->rate_pending_usleep[ddir];
+ return mtime_since_now(&genesis);
}
-unsigned long mtime_since_genesis(void)
+uint64_t utime_since_genesis(void)
{
- return mtime_since_now(&genesis);
+ return utime_since_now(&genesis);
}
int in_ramp_time(struct thread_data *td)
return 1;
fio_gettime(&tv, NULL);
- if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
+ if (utime_since(&td->epoch, &tv) >= td->o.ramp_time) {
td->ramp_time_over = 1;
reset_all_stats(td);
td_set_runstate(td, TD_RAMP);
return 0;
}
-static void fio_init time_init(void)
+void fio_time_init(void)
{
int i;
+ fio_clock_init();
+
/*
* Check the granularity of the nanosleep function
*/