6 static struct timeval genesis;
7 static unsigned long ns_granularity;
10 * busy looping version for the last few usec
12 void usec_spin(unsigned int usec)
16 fio_gettime(&start, NULL);
17 while (utime_since_now(&start) < usec)
21 void usec_sleep(struct thread_data *td, unsigned long usec)
27 unsigned long ts = usec;
29 if (usec < ns_granularity) {
34 ts = usec - ns_granularity;
37 req.tv_sec = ts / 1000000;
38 ts -= 1000000 * req.tv_sec;
42 req.tv_nsec = ts * 1000;
43 fio_gettime(&tv, NULL);
45 if (nanosleep(&req, NULL) < 0)
48 ts = utime_since_now(&tv);
53 } while (!td->terminate);
56 uint64_t mtime_since_genesis(void)
58 return mtime_since_now(&genesis);
61 int in_ramp_time(struct thread_data *td)
63 return td->o.ramp_time && !td->ramp_time_over;
66 int ramp_time_over(struct thread_data *td)
70 if (!td->o.ramp_time || td->ramp_time_over)
73 fio_gettime(&tv, NULL);
74 if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
75 td->ramp_time_over = 1;
77 td_set_runstate(td, TD_RAMP);
84 void fio_time_init(void)
91 * Check the granularity of the nanosleep function
93 for (i = 0; i < 10; i++) {
96 unsigned long elapsed;
98 fio_gettime(&tv, NULL);
102 nanosleep(&ts, NULL);
103 elapsed = utime_since_now(&tv);
105 if (elapsed > ns_granularity)
106 ns_granularity = elapsed;
110 void set_genesis_time(void)
112 fio_gettime(&genesis, NULL);
115 void fill_start_time(struct timeval *t)
117 memcpy(t, &genesis, sizeof(genesis));