6 static struct timeval genesis;
7 static unsigned long ns_granularity;
9 unsigned long long utime_since(struct timeval *s, struct timeval *e)
12 unsigned long long ret;
14 sec = e->tv_sec - s->tv_sec;
15 usec = e->tv_usec - s->tv_usec;
16 if (sec > 0 && usec < 0) {
22 * time warp bug on some kernels?
24 if (sec < 0 || (sec == 0 && usec < 0))
27 ret = sec * 1000000ULL + usec;
32 unsigned long long utime_since_now(struct timeval *s)
36 fio_gettime(&t, NULL);
37 return utime_since(s, &t);
40 unsigned long mtime_since(struct timeval *s, struct timeval *e)
44 sec = e->tv_sec - s->tv_sec;
45 usec = e->tv_usec - s->tv_usec;
46 if (sec > 0 && usec < 0) {
56 * time warp bug on some kernels?
64 unsigned long mtime_since_now(struct timeval *s)
67 void *p = __builtin_return_address(0);
70 return mtime_since(s, &t);
73 unsigned long time_since_now(struct timeval *s)
75 return mtime_since_now(s) / 1000;
79 * busy looping version for the last few usec
81 void usec_spin(unsigned int usec)
85 fio_gettime(&start, NULL);
86 while (utime_since_now(&start) < usec)
90 void usec_sleep(struct thread_data *td, unsigned long usec)
96 unsigned long ts = usec;
98 if (usec < ns_granularity) {
103 ts = usec - ns_granularity;
106 req.tv_sec = ts / 1000000;
107 ts -= 1000000 * req.tv_sec;
111 req.tv_nsec = ts * 1000;
112 fio_gettime(&tv, NULL);
114 if (nanosleep(&req, NULL) < 0)
117 ts = utime_since_now(&tv);
122 } while (!td->terminate);
125 long rate_throttle(struct thread_data *td, unsigned long time_spent,
126 unsigned long bytes, enum fio_ddir ddir)
128 unsigned int bs = td->o.min_bs[ddir];
129 unsigned long usec_cycle;
131 if (!td->o.rate[ddir] && !td->o.rate_iops[ddir])
134 usec_cycle = td->rate_usec_cycle[ddir] * (bytes / bs);
136 if (time_spent < usec_cycle) {
137 unsigned long s = usec_cycle - time_spent;
139 td->rate_pending_usleep[ddir] += s;
141 long overtime = time_spent - usec_cycle;
143 td->rate_pending_usleep[ddir] -= overtime;
146 return td->rate_pending_usleep[ddir];
149 unsigned long mtime_since_genesis(void)
151 return mtime_since_now(&genesis);
154 int in_ramp_time(struct thread_data *td)
156 return td->o.ramp_time && !td->ramp_time_over;
159 int ramp_time_over(struct thread_data *td)
163 if (!td->o.ramp_time || td->ramp_time_over)
166 fio_gettime(&tv, NULL);
167 if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
168 td->ramp_time_over = 1;
170 td_set_runstate(td, TD_RAMP);
177 static void fio_init time_init(void)
182 * Check the granularity of the nanosleep function
184 for (i = 0; i < 10; i++) {
187 unsigned long elapsed;
189 fio_gettime(&tv, NULL);
193 nanosleep(&ts, NULL);
194 elapsed = utime_since_now(&tv);
196 if (elapsed > ns_granularity)
197 ns_granularity = elapsed;
201 void set_genesis_time(void)
203 fio_gettime(&genesis, NULL);
206 void fill_start_time(struct timeval *t)
208 memcpy(t, &genesis, sizeof(genesis));