6 static struct timespec genesis;
7 static unsigned long ns_granularity;
9 void timespec_add_msec(struct timespec *ts, unsigned int msec)
11 uint64_t adj_nsec = 1000000ULL * msec;
13 ts->tv_nsec += adj_nsec;
14 if (adj_nsec >= 1000000000) {
15 uint64_t adj_sec = adj_nsec / 1000000000;
17 ts->tv_nsec -= adj_sec * 1000000000;
18 ts->tv_sec += adj_sec;
20 if (ts->tv_nsec >= 1000000000){
21 ts->tv_nsec -= 1000000000;
27 * busy looping version for the last few usec
29 uint64_t usec_spin(unsigned int usec)
31 struct timespec start;
34 fio_gettime(&start, NULL);
35 while ((t = utime_since_now(&start)) < usec)
42 * busy loop for a fixed amount of cycles
44 void cycles_spin(unsigned int n)
52 uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
59 unsigned long ts = usec;
61 if (usec < ns_granularity) {
66 ts = usec - ns_granularity;
69 req.tv_sec = ts / 1000000;
70 ts -= 1000000 * req.tv_sec;
72 * Limit sleep to ~1 second at most, otherwise we
73 * don't notice then someone signaled the job to
81 req.tv_nsec = ts * 1000;
82 fio_gettime(&tv, NULL);
84 if (nanosleep(&req, NULL) < 0)
87 ts = utime_since_now(&tv);
93 } while (!td->terminate);
98 uint64_t time_since_genesis(void)
100 return time_since_now(&genesis);
103 uint64_t mtime_since_genesis(void)
105 return mtime_since_now(&genesis);
108 uint64_t utime_since_genesis(void)
110 return utime_since_now(&genesis);
113 bool in_ramp_time(struct thread_data *td)
115 return td->o.ramp_time && !td->ramp_time_over;
118 static bool parent_update_ramp(struct thread_data *td)
120 struct thread_data *parent = td->parent;
122 if (!parent || parent->ramp_time_over)
125 reset_all_stats(parent);
126 parent->ramp_time_over = true;
127 td_set_runstate(parent, TD_RAMP);
131 bool ramp_time_over(struct thread_data *td)
133 if (!td->o.ramp_time || td->ramp_time_over)
136 if (utime_since_now(&td->epoch) >= td->o.ramp_time) {
137 td->ramp_time_over = true;
140 td_set_runstate(td, TD_RAMP);
143 * If we have a parent, the parent isn't doing IO. Hence
144 * the parent never enters do_io(), which will switch us
145 * from RAMP -> RUNNING. Do this manually here.
147 if (parent_update_ramp(td))
148 td_set_runstate(td, TD_RUNNING);
156 void fio_time_init(void)
163 * Check the granularity of the nanosleep function
165 for (i = 0; i < 10; i++) {
166 struct timespec tv, ts;
167 unsigned long elapsed;
169 fio_gettime(&tv, NULL);
173 nanosleep(&ts, NULL);
174 elapsed = utime_since_now(&tv);
176 if (elapsed > ns_granularity)
177 ns_granularity = elapsed;
181 void set_genesis_time(void)
183 fio_gettime(&genesis, NULL);
186 void set_epoch_time(struct thread_data *td, clockid_t log_alternate_epoch_clock_id, clockid_t job_start_clock_id)
189 fio_gettime(&td->epoch, NULL);
190 clock_gettime(log_alternate_epoch_clock_id, &ts);
191 td->alternate_epoch = (unsigned long long)(ts.tv_sec) * 1000 +
192 (unsigned long long)(ts.tv_nsec) / 1000000;
193 if (job_start_clock_id == log_alternate_epoch_clock_id)
195 td->job_start = td->alternate_epoch;
199 clock_gettime(job_start_clock_id, &ts);
200 td->job_start = (unsigned long long)(ts.tv_sec) * 1000 +
201 (unsigned long long)(ts.tv_nsec) / 1000000;
205 void fill_start_time(struct timespec *t)
207 memcpy(t, &genesis, sizeof(genesis));