#include "fio.h"
-static struct timeval genesis;
+static struct timespec genesis;
static unsigned long ns_granularity;
-unsigned long long utime_since(struct timeval *s, struct timeval *e)
+void timespec_add_msec(struct timespec *ts, unsigned int msec)
{
- long sec, usec;
- unsigned long long ret;
-
- sec = e->tv_sec - s->tv_sec;
- usec = e->tv_usec - s->tv_usec;
- if (sec > 0 && usec < 0) {
- sec--;
- usec += 1000000;
- }
-
- /*
- * time warp bug on some kernels?
- */
- if (sec < 0 || (sec == 0 && usec < 0))
- return 0;
-
- ret = sec * 1000000ULL + usec;
-
- return ret;
-}
+ uint64_t adj_nsec = 1000000ULL * msec;
-unsigned long long utime_since_now(struct timeval *s)
-{
- struct timeval t;
-
- fio_gettime(&t, NULL);
- return utime_since(s, &t);
-}
+ ts->tv_nsec += adj_nsec;
+ if (adj_nsec >= 1000000000) {
+ uint64_t adj_sec = adj_nsec / 1000000000;
-unsigned long mtime_since(struct timeval *s, struct timeval *e)
-{
- long sec, usec, ret;
-
- sec = e->tv_sec - s->tv_sec;
- usec = e->tv_usec - s->tv_usec;
- if (sec > 0 && usec < 0) {
- sec--;
- usec += 1000000;
+ ts->tv_nsec -= adj_sec * 1000000000;
+ ts->tv_sec += adj_sec;
+ }
+ if (ts->tv_nsec >= 1000000000){
+ ts->tv_nsec -= 1000000000;
+ ts->tv_sec++;
}
-
- sec *= 1000UL;
- usec /= 1000UL;
- ret = sec + usec;
-
- /*
- * time warp bug on some kernels?
- */
- if (ret < 0)
- ret = 0;
-
- return ret;
-}
-
-unsigned long mtime_since_now(struct timeval *s)
-{
- struct timeval t;
- void *p = __builtin_return_address(0);
-
- fio_gettime(&t, p);
- return mtime_since(s, &t);
-}
-
-unsigned long time_since_now(struct timeval *s)
-{
- return mtime_since_now(s) / 1000;
}
/*
* busy looping version for the last few usec
*/
-void usec_spin(unsigned int usec)
+uint64_t usec_spin(unsigned int usec)
{
- struct timeval start;
+ struct timespec start;
+ uint64_t t;
fio_gettime(&start, NULL);
- while (utime_since_now(&start) < usec)
+ while ((t = utime_since_now(&start)) < usec)
nop;
+
+ return t;
}
-void usec_sleep(struct thread_data *td, unsigned long usec)
+uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
{
struct timespec req;
- struct timeval tv;
+ struct timespec tv;
+ uint64_t t = 0;
do {
unsigned long ts = usec;
if (usec < ns_granularity) {
- usec_spin(usec);
+ t += usec_spin(usec);
break;
}
if (ts >= 1000000) {
req.tv_sec = ts / 1000000;
ts -= 1000000 * req.tv_sec;
+ /*
+ * Limit sleep to ~1 second at most, otherwise we
+ * don't notice then someone signaled the job to
+ * exit manually.
+ */
+ if (req.tv_sec > 1)
+ req.tv_sec = 1;
} else
req.tv_sec = 0;
break;
ts = utime_since_now(&tv);
+ t += ts;
if (ts >= usec)
break;
usec -= ts;
} while (!td->terminate);
+
+ return t;
}
-void rate_throttle(struct thread_data *td, unsigned long time_spent,
- unsigned int bytes)
+uint64_t time_since_genesis(void)
{
- unsigned long usec_cycle;
- unsigned int bs;
-
- if (!td->o.rate && !td->o.rate_iops)
- return;
-
- if (td_rw(td))
- bs = td->o.rw_min_bs;
- else if (td_read(td))
- bs = td->o.min_bs[DDIR_READ];
- else
- bs = td->o.min_bs[DDIR_WRITE];
-
- usec_cycle = td->rate_usec_cycle * (bytes / bs);
-
- if (time_spent < usec_cycle) {
- unsigned long s = usec_cycle - time_spent;
-
- td->rate_pending_usleep += s;
-
- if (td->rate_pending_usleep >= 100000) {
- struct timeval t;
-
- fio_gettime(&t, NULL);
- usec_sleep(td, td->rate_pending_usleep);
- td->rate_pending_usleep -= utime_since_now(&t);
- }
- } else {
- long overtime = time_spent - usec_cycle;
-
- td->rate_pending_usleep -= overtime;
- }
+ return time_since_now(&genesis);
}
-unsigned long mtime_since_genesis(void)
+uint64_t mtime_since_genesis(void)
{
return mtime_since_now(&genesis);
}
-int in_ramp_time(struct thread_data *td)
+uint64_t utime_since_genesis(void)
+{
+ return utime_since_now(&genesis);
+}
+
+bool in_ramp_time(struct thread_data *td)
{
return td->o.ramp_time && !td->ramp_time_over;
}
-int ramp_time_over(struct thread_data *td)
+static bool parent_update_ramp(struct thread_data *td)
{
- struct timeval tv;
+ struct thread_data *parent = td->parent;
+ if (!parent || parent->ramp_time_over)
+ return false;
+
+ reset_all_stats(parent);
+ parent->ramp_time_over = true;
+ td_set_runstate(parent, TD_RAMP);
+ return true;
+}
+
+bool ramp_time_over(struct thread_data *td)
+{
if (!td->o.ramp_time || td->ramp_time_over)
- return 1;
+ return true;
- fio_gettime(&tv, NULL);
- if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
- td->ramp_time_over = 1;
+ if (utime_since_now(&td->epoch) >= td->o.ramp_time) {
+ td->ramp_time_over = true;
reset_all_stats(td);
+ reset_io_stats(td);
td_set_runstate(td, TD_RAMP);
- return 1;
+
+ /*
+ * If we have a parent, the parent isn't doing IO. Hence
+ * the parent never enters do_io(), which will switch us
+ * from RAMP -> RUNNING. Do this manually here.
+ */
+ if (parent_update_ramp(td))
+ td_set_runstate(td, TD_RUNNING);
+
+ return true;
}
- return 0;
+ return false;
}
-static void fio_init time_init(void)
+void fio_time_init(void)
{
int i;
+ fio_clock_init();
+
/*
* Check the granularity of the nanosleep function
*/
for (i = 0; i < 10; i++) {
- struct timeval tv;
- struct timespec ts;
+ struct timespec tv, ts;
unsigned long elapsed;
fio_gettime(&tv, NULL);
fio_gettime(&genesis, NULL);
}
-void fill_start_time(struct timeval *t)
+void set_epoch_time(struct thread_data *td, int log_alternate_epoch, clockid_t clock_id)
+{
+ fio_gettime(&td->epoch, NULL);
+ if (log_alternate_epoch) {
+ struct timespec ts;
+ clock_gettime(clock_id, &ts);
+ td->alternate_epoch = (unsigned long long)(ts.tv_sec) * 1000 +
+ (unsigned long long)(ts.tv_nsec) / 1000000;
+ }
+}
+
+void fill_start_time(struct timespec *t)
{
memcpy(t, &genesis, sizeof(genesis));
}