options: more option grouping
[fio.git] / time.c
... / ...
CommitLineData
1#include <time.h>
2#include <sys/time.h>
3
4#include "fio.h"
5
6static struct timeval genesis;
7static unsigned long ns_granularity;
8
9unsigned long long utime_since(struct timeval *s, struct timeval *e)
10{
11 long sec, usec;
12 unsigned long long ret;
13
14 sec = e->tv_sec - s->tv_sec;
15 usec = e->tv_usec - s->tv_usec;
16 if (sec > 0 && usec < 0) {
17 sec--;
18 usec += 1000000;
19 }
20
21 /*
22 * time warp bug on some kernels?
23 */
24 if (sec < 0 || (sec == 0 && usec < 0))
25 return 0;
26
27 ret = sec * 1000000ULL + usec;
28
29 return ret;
30}
31
32unsigned long long utime_since_now(struct timeval *s)
33{
34 struct timeval t;
35
36 fio_gettime(&t, NULL);
37 return utime_since(s, &t);
38}
39
40unsigned long mtime_since(struct timeval *s, struct timeval *e)
41{
42 long sec, usec, ret;
43
44 sec = e->tv_sec - s->tv_sec;
45 usec = e->tv_usec - s->tv_usec;
46 if (sec > 0 && usec < 0) {
47 sec--;
48 usec += 1000000;
49 }
50
51 if (sec < 0 || (sec == 0 && usec < 0))
52 return 0;
53
54 sec *= 1000UL;
55 usec /= 1000UL;
56 ret = sec + usec;
57
58 return ret;
59}
60
61unsigned long mtime_since_now(struct timeval *s)
62{
63 struct timeval t;
64 void *p = __builtin_return_address(0);
65
66 fio_gettime(&t, p);
67 return mtime_since(s, &t);
68}
69
70unsigned long time_since_now(struct timeval *s)
71{
72 return mtime_since_now(s) / 1000;
73}
74
75/*
76 * busy looping version for the last few usec
77 */
78void usec_spin(unsigned int usec)
79{
80 struct timeval start;
81
82 fio_gettime(&start, NULL);
83 while (utime_since_now(&start) < usec)
84 nop;
85}
86
87void usec_sleep(struct thread_data *td, unsigned long usec)
88{
89 struct timespec req;
90 struct timeval tv;
91
92 do {
93 unsigned long ts = usec;
94
95 if (usec < ns_granularity) {
96 usec_spin(usec);
97 break;
98 }
99
100 ts = usec - ns_granularity;
101
102 if (ts >= 1000000) {
103 req.tv_sec = ts / 1000000;
104 ts -= 1000000 * req.tv_sec;
105 } else
106 req.tv_sec = 0;
107
108 req.tv_nsec = ts * 1000;
109 fio_gettime(&tv, NULL);
110
111 if (nanosleep(&req, NULL) < 0)
112 break;
113
114 ts = utime_since_now(&tv);
115 if (ts >= usec)
116 break;
117
118 usec -= ts;
119 } while (!td->terminate);
120}
121
122unsigned long mtime_since_genesis(void)
123{
124 return mtime_since_now(&genesis);
125}
126
127int in_ramp_time(struct thread_data *td)
128{
129 return td->o.ramp_time && !td->ramp_time_over;
130}
131
132int ramp_time_over(struct thread_data *td)
133{
134 struct timeval tv;
135
136 if (!td->o.ramp_time || td->ramp_time_over)
137 return 1;
138
139 fio_gettime(&tv, NULL);
140 if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
141 td->ramp_time_over = 1;
142 reset_all_stats(td);
143 td_set_runstate(td, TD_RAMP);
144 return 1;
145 }
146
147 return 0;
148}
149
150void fio_time_init(void)
151{
152 int i;
153
154 fio_clock_init();
155
156 /*
157 * Check the granularity of the nanosleep function
158 */
159 for (i = 0; i < 10; i++) {
160 struct timeval tv;
161 struct timespec ts;
162 unsigned long elapsed;
163
164 fio_gettime(&tv, NULL);
165 ts.tv_sec = 0;
166 ts.tv_nsec = 1000;
167
168 nanosleep(&ts, NULL);
169 elapsed = utime_since_now(&tv);
170
171 if (elapsed > ns_granularity)
172 ns_granularity = elapsed;
173 }
174}
175
176void set_genesis_time(void)
177{
178 fio_gettime(&genesis, NULL);
179}
180
181void fill_start_time(struct timeval *t)
182{
183 memcpy(t, &genesis, sizeof(genesis));
184}