static struct timeval genesis;
static unsigned long ns_granularity;
+void timeval_add_msec(struct timeval *tv, unsigned int msec)
+{
+ tv->tv_usec += 1000 * msec;
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+}
+
/*
* busy looping version for the last few usec
*/
-void usec_spin(unsigned int usec)
+uint64_t usec_spin(unsigned int usec)
{
struct timeval start;
+ uint64_t t;
fio_gettime(&start, NULL);
- while (utime_since_now(&start) < usec)
+ while ((t = utime_since_now(&start)) < usec)
nop;
+
+ return t;
}
-void usec_sleep(struct thread_data *td, unsigned long usec)
+uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
{
struct timespec req;
struct timeval tv;
+ uint64_t t = 0;
do {
unsigned long ts = usec;
if (usec < ns_granularity) {
- usec_spin(usec);
+ t += usec_spin(usec);
break;
}
break;
ts = utime_since_now(&tv);
+ t += ts;
if (ts >= usec)
break;
usec -= ts;
} while (!td->terminate);
+
+ return t;
+}
+
+uint64_t time_since_genesis(void)
+{
+ return time_since_now(&genesis);
}
uint64_t mtime_since_genesis(void)
return utime_since_now(&genesis);
}
-int in_ramp_time(struct thread_data *td)
+bool in_ramp_time(struct thread_data *td)
{
return td->o.ramp_time && !td->ramp_time_over;
}
-int ramp_time_over(struct thread_data *td)
+static void parent_update_ramp(struct thread_data *td)
+{
+ struct thread_data *parent = td->parent;
+
+ if (!parent || parent->ramp_time_over)
+ return;
+
+ reset_all_stats(parent);
+ parent->ramp_time_over = 1;
+ td_set_runstate(parent, TD_RAMP);
+}
+
+bool ramp_time_over(struct thread_data *td)
{
struct timeval tv;
if (!td->o.ramp_time || td->ramp_time_over)
- return 1;
+ return true;
fio_gettime(&tv, NULL);
if (utime_since(&td->epoch, &tv) >= td->o.ramp_time) {
td->ramp_time_over = 1;
reset_all_stats(td);
td_set_runstate(td, TD_RAMP);
- return 1;
+ parent_update_ramp(td);
+ return true;
}
- return 0;
+ return false;
}
void fio_time_init(void)