16 #ifdef ARCH_HAVE_CPU_CLOCK
17 static unsigned long cycles_per_usec;
22 struct timeval last_tv;
24 unsigned long last_cycles;
26 static pthread_key_t tv_tls_key;
28 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
29 int fio_clock_source_set = 0;
30 enum fio_cs fio_clock_source_inited = CS_INVAL;
35 #define HASH_SIZE (1 << HASH_BITS)
37 static struct flist_head hash[HASH_SIZE];
38 static int gtod_inited;
41 struct flist_head list;
46 static struct gtod_log *find_hash(void *caller)
48 unsigned long h = hash_ptr(caller, HASH_BITS);
49 struct flist_head *entry;
51 flist_for_each(entry, &hash[h]) {
52 struct gtod_log *log = flist_entry(entry, struct gtod_log,
55 if (log->caller == caller)
62 static struct gtod_log *find_log(void *caller)
64 struct gtod_log *log = find_hash(caller);
69 log = malloc(sizeof(*log));
70 INIT_FLIST_HEAD(&log->list);
74 h = hash_ptr(caller, HASH_BITS);
75 flist_add_tail(&log->list, &hash[h]);
81 static void gtod_log_caller(void *caller)
84 struct gtod_log *log = find_log(caller);
90 static void fio_exit fio_dump_gtod(void)
92 unsigned long total_calls = 0;
95 for (i = 0; i < HASH_SIZE; i++) {
96 struct flist_head *entry;
99 flist_for_each(entry, &hash[i]) {
100 log = flist_entry(entry, struct gtod_log, list);
102 printf("function %p, calls %lu\n", log->caller,
104 total_calls += log->calls;
108 printf("Total %lu gettimeofday\n", total_calls);
111 static void fio_init gtod_init(void)
115 for (i = 0; i < HASH_SIZE; i++)
116 INIT_FLIST_HEAD(&hash[i]);
121 #endif /* FIO_DEBUG_TIME */
123 static int fill_clock_gettime(struct timespec *ts)
125 #ifdef FIO_HAVE_CLOCK_MONOTONIC
126 return clock_gettime(CLOCK_MONOTONIC, ts);
128 return clock_gettime(CLOCK_REALTIME, ts);
132 #ifdef FIO_DEBUG_TIME
133 void fio_gettime(struct timeval *tp, void *caller)
135 void fio_gettime(struct timeval *tp, void fio_unused *caller)
140 #ifdef FIO_DEBUG_TIME
142 caller = __builtin_return_address(0);
144 gtod_log_caller(caller);
147 memcpy(tp, fio_tv, sizeof(*tp));
151 tv = pthread_getspecific(tv_tls_key);
153 switch (fio_clock_source) {
155 gettimeofday(tp, NULL);
160 if (fill_clock_gettime(&ts) < 0) {
161 log_err("fio: clock_gettime fails\n");
165 tp->tv_sec = ts.tv_sec;
166 tp->tv_usec = ts.tv_nsec / 1000;
169 #ifdef ARCH_HAVE_CPU_CLOCK
171 unsigned long long usecs, t;
174 if (tv && t < tv->last_cycles) {
175 dprint(FD_TIME, "CPU clock going back in time\n");
180 usecs = t / cycles_per_usec;
181 tp->tv_sec = usecs / 1000000;
182 tp->tv_usec = usecs % 1000000;
187 log_err("fio: invalid clock source %d\n", fio_clock_source);
192 * If Linux is using the tsc clock on non-synced processors,
193 * sometimes time can appear to drift backwards. Fix that up.
196 if (tv->last_tv_valid) {
197 if (tp->tv_sec < tv->last_tv.tv_sec)
198 tp->tv_sec = tv->last_tv.tv_sec;
199 else if (tv->last_tv.tv_sec == tp->tv_sec &&
200 tp->tv_usec < tv->last_tv.tv_usec)
201 tp->tv_usec = tv->last_tv.tv_usec;
203 tv->last_tv_valid = 1;
204 memcpy(&tv->last_tv, tp, sizeof(*tp));
208 #ifdef ARCH_HAVE_CPU_CLOCK
209 static unsigned long get_cycles_per_usec(void)
213 unsigned long long c_s, c_e;
215 fill_clock_gettime(&ts);
216 s.tv_sec = ts.tv_sec;
217 s.tv_usec = ts.tv_nsec / 1000;
219 c_s = get_cpu_clock();
221 unsigned long long elapsed;
223 fill_clock_gettime(&ts);
224 e.tv_sec = ts.tv_sec;
225 e.tv_usec = ts.tv_nsec / 1000;
227 elapsed = utime_since(&s, &e);
228 if (elapsed >= 1280) {
229 c_e = get_cpu_clock();
234 return (c_e - c_s + 127) >> 7;
237 #define NR_TIME_ITERS 50
239 static void calibrate_cpu_clock(void)
241 double delta, mean, S;
242 unsigned long avg, cycles[NR_TIME_ITERS];
245 cycles[0] = get_cycles_per_usec();
246 S = delta = mean = 0.0;
247 for (i = 0; i < NR_TIME_ITERS; i++) {
248 cycles[i] = get_cycles_per_usec();
249 delta = cycles[i] - mean;
251 mean += delta / (i + 1.0);
252 S += delta * (cycles[i] - mean);
256 S = sqrt(S / (NR_TIME_ITERS - 1.0));
259 for (i = 0; i < NR_TIME_ITERS; i++) {
260 double this = cycles[i];
262 if ((fmax(this, mean) - fmin(this, mean)) > S)
268 S /= (double) NR_TIME_ITERS;
271 for (i = 0; i < NR_TIME_ITERS; i++)
272 dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
275 avg = (avg + 9) / 10;
276 dprint(FD_TIME, "avg: %lu\n", avg);
277 dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
279 cycles_per_usec = avg;
282 static void calibrate_cpu_clock(void)
287 void fio_local_clock_init(int is_thread)
291 t = calloc(sizeof(*t), 1);
292 if (pthread_setspecific(tv_tls_key, t))
293 log_err("fio: can't set TLS key\n");
296 static void kill_tv_tls_key(void *data)
301 void fio_clock_init(void)
303 if (fio_clock_source == fio_clock_source_inited)
306 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
307 log_err("fio: can't create TLS key\n");
309 fio_clock_source_inited = fio_clock_source;
310 calibrate_cpu_clock();
313 * If the arch sets tsc_reliable != 0, then it must be good enough
314 * to use as THE clock source. For x86 CPUs, this means the TSC
315 * runs at a constant rate and is synced across CPU cores.
318 if (!fio_clock_source_set)
319 fio_clock_source = CS_CPUCLOCK;
320 } else if (fio_clock_source == CS_CPUCLOCK)
321 log_info("fio: clocksource=cpu may not be reliable\n");
324 unsigned long long utime_since(struct timeval *s, struct timeval *e)
327 unsigned long long ret;
329 sec = e->tv_sec - s->tv_sec;
330 usec = e->tv_usec - s->tv_usec;
331 if (sec > 0 && usec < 0) {
337 * time warp bug on some kernels?
339 if (sec < 0 || (sec == 0 && usec < 0))
342 ret = sec * 1000000ULL + usec;
347 unsigned long long utime_since_now(struct timeval *s)
351 fio_gettime(&t, NULL);
352 return utime_since(s, &t);
355 unsigned long mtime_since(struct timeval *s, struct timeval *e)
359 sec = e->tv_sec - s->tv_sec;
360 usec = e->tv_usec - s->tv_usec;
361 if (sec > 0 && usec < 0) {
366 if (sec < 0 || (sec == 0 && usec < 0))
376 unsigned long mtime_since_now(struct timeval *s)
379 void *p = __builtin_return_address(0);
382 return mtime_since(s, &t);
385 unsigned long time_since_now(struct timeval *s)
387 return mtime_since_now(s) / 1000;
390 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)
392 #define CLOCK_ENTRIES 100000
400 struct clock_thread {
403 pthread_mutex_t lock;
404 pthread_mutex_t started;
406 struct clock_entry *entries;
409 static inline uint64_t atomic64_inc_return(uint64_t *seq)
411 return 1 + __sync_fetch_and_add(seq, 1);
414 static void *clock_thread_fn(void *data)
416 struct clock_thread *t = data;
417 struct clock_entry *c;
418 os_cpu_mask_t cpu_mask;
421 memset(&cpu_mask, 0, sizeof(cpu_mask));
422 fio_cpu_set(&cpu_mask, t->cpu);
424 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
425 log_err("clock setaffinity failed\n");
429 pthread_mutex_lock(&t->lock);
430 pthread_mutex_unlock(&t->started);
433 for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
438 seq = atomic64_inc_return(t->seq);
439 tsc = get_cpu_clock();
440 } while (seq != *t->seq);
446 log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[CLOCK_ENTRIES - 1].tsc - t->entries[0].tsc);
450 static int clock_cmp(const void *p1, const void *p2)
452 const struct clock_entry *c1 = p1;
453 const struct clock_entry *c2 = p2;
455 if (c1->seq == c2->seq)
456 log_err("cs: bug in atomic sequence!\n");
458 return c1->seq - c2->seq;
461 int fio_monotonic_clocktest(void)
463 struct clock_thread *threads;
464 unsigned int nr_cpus = cpus_online();
465 struct clock_entry *entries;
466 unsigned long tentries, failed;
470 fio_debug |= 1U << FD_TIME;
471 calibrate_cpu_clock();
472 fio_debug &= ~(1U << FD_TIME);
474 threads = malloc(nr_cpus * sizeof(struct clock_thread));
475 tentries = CLOCK_ENTRIES * nr_cpus;
476 entries = malloc(tentries * sizeof(struct clock_entry));
478 log_info("cs: Testing %u CPUs\n", nr_cpus);
480 for (i = 0; i < nr_cpus; i++) {
481 struct clock_thread *t = &threads[i];
485 t->entries = &entries[i * CLOCK_ENTRIES];
486 pthread_mutex_init(&t->lock, NULL);
487 pthread_mutex_init(&t->started, NULL);
488 pthread_mutex_lock(&t->lock);
489 pthread_create(&t->thread, NULL, clock_thread_fn, t);
492 for (i = 0; i < nr_cpus; i++) {
493 struct clock_thread *t = &threads[i];
495 pthread_mutex_lock(&t->started);
498 for (i = 0; i < nr_cpus; i++) {
499 struct clock_thread *t = &threads[i];
501 pthread_mutex_unlock(&t->lock);
504 for (failed = i = 0; i < nr_cpus; i++) {
505 struct clock_thread *t = &threads[i];
508 pthread_join(t->thread, &ret);
515 log_err("Clocksource test: %u threads failed\n", failed);
519 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
521 for (failed = i = 0; i < tentries; i++) {
522 struct clock_entry *prev, *this = &entries[i];
529 if (prev->tsc > this->tsc) {
530 uint64_t diff = prev->tsc - this->tsc;
532 log_info("cs: CPU clock mismatch (diff=%lu):\n", diff);
533 log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", prev->cpu, prev->tsc, prev->seq);
534 log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", this->cpu, this->tsc, this->seq);
542 log_info("cs: Failed: %lu\n", failed);
544 log_info("cs: Pass!\n");
551 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
553 int fio_monotonic_clocktest(void)
555 log_info("cs: current platform does not support CPU clocks\n");