6 static struct timeval genesis;
7 static unsigned long ns_granularity;
10 * busy looping version for the last few usec
12 void usec_spin(unsigned int usec)
16 fio_gettime(&start, NULL);
17 while (utime_since_now(&start) < usec)
21 void usec_sleep(struct thread_data *td, unsigned long usec)
27 unsigned long ts = usec;
29 if (usec < ns_granularity) {
34 ts = usec - ns_granularity;
37 req.tv_sec = ts / 1000000;
38 ts -= 1000000 * req.tv_sec;
42 req.tv_nsec = ts * 1000;
43 fio_gettime(&tv, NULL);
45 if (nanosleep(&req, NULL) < 0)
48 ts = utime_since_now(&tv);
53 } while (!td->terminate);
56 uint64_t mtime_since_genesis(void)
58 return mtime_since_now(&genesis);
61 uint64_t utime_since_genesis(void)
63 return utime_since_now(&genesis);
66 int in_ramp_time(struct thread_data *td)
68 return td->o.ramp_time && !td->ramp_time_over;
71 int ramp_time_over(struct thread_data *td)
75 if (!td->o.ramp_time || td->ramp_time_over)
78 fio_gettime(&tv, NULL);
79 if (utime_since(&td->epoch, &tv) >= td->o.ramp_time) {
80 td->ramp_time_over = 1;
82 td_set_runstate(td, TD_RAMP);
89 void fio_time_init(void)
96 * Check the granularity of the nanosleep function
98 for (i = 0; i < 10; i++) {
101 unsigned long elapsed;
103 fio_gettime(&tv, NULL);
107 nanosleep(&ts, NULL);
108 elapsed = utime_since_now(&tv);
110 if (elapsed > ns_granularity)
111 ns_granularity = elapsed;
115 void set_genesis_time(void)
117 fio_gettime(&genesis, NULL);
120 void fill_start_time(struct timeval *t)
122 memcpy(t, &genesis, sizeof(genesis));