6 static struct timeval genesis;
7 static unsigned long ns_granularity;
9 void timeval_add_msec(struct timeval *tv, unsigned int msec)
11 tv->tv_usec += 1000 * msec;
12 if (tv->tv_usec >= 1000000) {
13 tv->tv_usec -= 1000000;
19 * busy looping version for the last few usec
21 uint64_t usec_spin(unsigned int usec)
26 fio_gettime(&start, NULL);
27 while ((t = utime_since_now(&start)) < usec)
33 uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
40 unsigned long ts = usec;
42 if (usec < ns_granularity) {
47 ts = usec - ns_granularity;
50 req.tv_sec = ts / 1000000;
51 ts -= 1000000 * req.tv_sec;
55 req.tv_nsec = ts * 1000;
56 fio_gettime(&tv, NULL);
58 if (nanosleep(&req, NULL) < 0)
61 ts = utime_since_now(&tv);
67 } while (!td->terminate);
72 uint64_t time_since_genesis(void)
74 return time_since_now(&genesis);
77 uint64_t mtime_since_genesis(void)
79 return mtime_since_now(&genesis);
82 uint64_t utime_since_genesis(void)
84 return utime_since_now(&genesis);
87 bool in_ramp_time(struct thread_data *td)
89 return td->o.ramp_time && !td->ramp_time_over;
92 static void parent_update_ramp(struct thread_data *td)
94 struct thread_data *parent = td->parent;
96 if (!parent || parent->ramp_time_over)
99 reset_all_stats(parent);
100 parent->ramp_time_over = 1;
101 td_set_runstate(parent, TD_RAMP);
104 bool ramp_time_over(struct thread_data *td)
108 if (!td->o.ramp_time || td->ramp_time_over)
111 fio_gettime(&tv, NULL);
112 if (utime_since(&td->epoch, &tv) >= td->o.ramp_time) {
113 td->ramp_time_over = 1;
115 td_set_runstate(td, TD_RAMP);
116 parent_update_ramp(td);
123 void fio_time_init(void)
130 * Check the granularity of the nanosleep function
132 for (i = 0; i < 10; i++) {
135 unsigned long elapsed;
137 fio_gettime(&tv, NULL);
141 nanosleep(&ts, NULL);
142 elapsed = utime_since_now(&tv);
144 if (elapsed > ns_granularity)
145 ns_granularity = elapsed;
149 void set_genesis_time(void)
151 fio_gettime(&genesis, NULL);
154 void fill_start_time(struct timeval *t)
156 memcpy(t, &genesis, sizeof(genesis));