#include "fio.h"
static struct timeval genesis;
+static unsigned long ns_granularity;
-unsigned long utime_since(struct timeval *s, struct timeval *e)
+unsigned long long utime_since(struct timeval *s, struct timeval *e)
{
long sec, usec;
+ unsigned long long ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
usec += 1000000;
}
- sec *= (double) 1000000;
+ /*
+ * time warp bug on some kernels?
+ */
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
- return sec + usec;
+ ret = sec * 1000000ULL + usec;
+
+ return ret;
}
-unsigned long utime_since_now(struct timeval *s)
+unsigned long long utime_since_now(struct timeval *s)
{
struct timeval t;
unsigned long mtime_since(struct timeval *s, struct timeval *e)
{
- long sec, usec;
+ long sec, usec, ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
usec += 1000000;
}
- sec *= (double) 1000;
- usec /= (double) 1000;
+ sec *= 1000UL;
+ usec /= 1000UL;
+ ret = sec + usec;
- return sec + usec;
+ /*
+ * time warp bug on some kernels?
+ */
+ if (ret < 0)
+ ret = 0;
+
+ return ret;
}
unsigned long mtime_since_now(struct timeval *s)
/*
* busy looping version for the last few usec
*/
-void __usec_sleep(unsigned int usec)
+void usec_spin(unsigned int usec)
{
struct timeval start;
void usec_sleep(struct thread_data *td, unsigned long usec)
{
- struct timespec req, rem;
-
- req.tv_sec = usec / 1000000;
- req.tv_nsec = usec * 1000 - req.tv_sec * 1000000;
+ struct timespec req;
+ struct timeval tv;
do {
- if (usec < 5000) {
- __usec_sleep(usec);
+ unsigned long ts = usec;
+
+ if (usec < ns_granularity) {
+ usec_spin(usec);
break;
}
- rem.tv_sec = rem.tv_nsec = 0;
- if (nanosleep(&req, &rem) < 0)
- break;
+ ts = usec - ns_granularity;
+
+ if (ts >= 1000000) {
+ req.tv_sec = ts / 1000000;
+ ts -= 1000000 * req.tv_sec;
+ } else
+ req.tv_sec = 0;
- if ((rem.tv_sec + rem.tv_nsec) == 0)
+ req.tv_nsec = ts * 1000;
+ fio_gettime(&tv, NULL);
+
+ if (nanosleep(&req, NULL) < 0)
break;
- req.tv_nsec = rem.tv_nsec;
- req.tv_sec = rem.tv_sec;
+ ts = utime_since_now(&tv);
+ if (ts >= usec)
+ break;
- usec = rem.tv_sec * 1000000 + rem.tv_nsec / 1000;
+ usec -= ts;
} while (!td->terminate);
}
-void rate_throttle(struct thread_data *td, unsigned long time_spent,
- unsigned int bytes, int ddir)
+unsigned long mtime_since_genesis(void)
{
- unsigned long usec_cycle;
-
- if (!td->rate)
- return;
+ return mtime_since_now(&genesis);
+}
- usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs[ddir]);
+int in_ramp_time(struct thread_data *td)
+{
+ return td->o.ramp_time && !td->ramp_time_over;
+}
- if (time_spent < usec_cycle) {
- unsigned long s = usec_cycle - time_spent;
+int ramp_time_over(struct thread_data *td)
+{
+ struct timeval tv;
- td->rate_pending_usleep += s;
- if (td->rate_pending_usleep >= 100000) {
- usec_sleep(td, td->rate_pending_usleep);
- td->rate_pending_usleep = 0;
- }
- } else {
- long overtime = time_spent - usec_cycle;
+ if (!td->o.ramp_time || td->ramp_time_over)
+ return 1;
- td->rate_pending_usleep -= overtime;
+ fio_gettime(&tv, NULL);
+ if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
+ td->ramp_time_over = 1;
+ reset_all_stats(td);
+ td_set_runstate(td, TD_RAMP);
+ return 1;
}
+
+ return 0;
}
-unsigned long mtime_since_genesis(void)
+static void fio_init time_init(void)
{
- return mtime_since_now(&genesis);
+ int i;
+
+ /*
+ * Check the granularity of the nanosleep function
+ */
+ for (i = 0; i < 10; i++) {
+ struct timeval tv;
+ struct timespec ts;
+ unsigned long elapsed;
+
+ fio_gettime(&tv, NULL);
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000;
+
+ nanosleep(&ts, NULL);
+ elapsed = utime_since_now(&tv);
+
+ if (elapsed > ns_granularity)
+ ns_granularity = elapsed;
+ }
}
-static void fio_init time_init(void)
+void set_genesis_time(void)
{
fio_gettime(&genesis, NULL);
}