16 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
17 static unsigned long cycles_per_usec;
18 static unsigned long inv_cycles_per_usec;
19 static uint64_t max_cycles_for_mult;
28 #ifdef CONFIG_TLS_THREAD
29 static __thread struct tv_valid static_tv_valid;
31 static pthread_key_t tv_tls_key;
34 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
35 int fio_clock_source_set = 0;
36 static enum fio_cs fio_clock_source_inited = CS_INVAL;
41 #define HASH_SIZE (1 << HASH_BITS)
43 static struct flist_head hash[HASH_SIZE];
44 static int gtod_inited;
47 struct flist_head list;
52 static struct gtod_log *find_hash(void *caller)
54 unsigned long h = hash_ptr(caller, HASH_BITS);
55 struct flist_head *entry;
57 flist_for_each(entry, &hash[h]) {
58 struct gtod_log *log = flist_entry(entry, struct gtod_log,
61 if (log->caller == caller)
68 static void inc_caller(void *caller)
70 struct gtod_log *log = find_hash(caller);
75 log = malloc(sizeof(*log));
76 INIT_FLIST_HEAD(&log->list);
80 h = hash_ptr(caller, HASH_BITS);
81 flist_add_tail(&log->list, &hash[h]);
87 static void gtod_log_caller(void *caller)
93 static void fio_exit fio_dump_gtod(void)
95 unsigned long total_calls = 0;
98 for (i = 0; i < HASH_SIZE; i++) {
99 struct flist_head *entry;
100 struct gtod_log *log;
102 flist_for_each(entry, &hash[i]) {
103 log = flist_entry(entry, struct gtod_log, list);
105 printf("function %p, calls %lu\n", log->caller,
107 total_calls += log->calls;
111 printf("Total %lu gettimeofday\n", total_calls);
114 static void fio_init gtod_init(void)
118 for (i = 0; i < HASH_SIZE; i++)
119 INIT_FLIST_HEAD(&hash[i]);
124 #endif /* FIO_DEBUG_TIME */
126 #ifdef CONFIG_CLOCK_GETTIME
127 static int fill_clock_gettime(struct timespec *ts)
129 #ifdef CONFIG_CLOCK_MONOTONIC
130 return clock_gettime(CLOCK_MONOTONIC, ts);
132 return clock_gettime(CLOCK_REALTIME, ts);
137 static void __fio_gettime(struct timeval *tp)
141 #ifdef CONFIG_TLS_THREAD
142 tv = &static_tv_valid;
144 tv = pthread_getspecific(tv_tls_key);
147 switch (fio_clock_source) {
148 #ifdef CONFIG_GETTIMEOFDAY
150 gettimeofday(tp, NULL);
153 #ifdef CONFIG_CLOCK_GETTIME
157 if (fill_clock_gettime(&ts) < 0) {
158 log_err("fio: clock_gettime fails\n");
162 tp->tv_sec = ts.tv_sec;
163 tp->tv_usec = ts.tv_nsec / 1000;
167 #ifdef ARCH_HAVE_CPU_CLOCK
172 if (t < tv->last_cycles && tv->last_tv_valid &&
174 log_err("fio: CPU clock going back in time\n");
179 tv->last_tv_valid = 1;
180 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
181 usecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC;
183 if (t < max_cycles_for_mult)
184 usecs = (t * inv_cycles_per_usec) / 16777216UL;
186 usecs = t / cycles_per_usec;
188 tp->tv_sec = usecs / 1000000;
189 tp->tv_usec = usecs % 1000000;
194 log_err("fio: invalid clock source %d\n", fio_clock_source);
199 #ifdef FIO_DEBUG_TIME
200 void fio_gettime(struct timeval *tp, void *caller)
202 void fio_gettime(struct timeval *tp, void fio_unused *caller)
205 #ifdef FIO_DEBUG_TIME
207 caller = __builtin_return_address(0);
209 gtod_log_caller(caller);
211 if (fio_unlikely(fio_gettime_offload(tp)))
217 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
218 static unsigned long get_cycles_per_usec(void)
222 enum fio_cs old_cs = fio_clock_source;
224 #ifdef CONFIG_CLOCK_GETTIME
225 fio_clock_source = CS_CGETTIME;
227 fio_clock_source = CS_GTOD;
231 c_s = get_cpu_clock();
237 elapsed = utime_since(&s, &e);
238 if (elapsed >= 1280) {
239 c_e = get_cpu_clock();
244 fio_clock_source = old_cs;
245 return (c_e - c_s + 127) >> 7;
248 #define NR_TIME_ITERS 50
250 static int calibrate_cpu_clock(void)
252 double delta, mean, S;
253 uint64_t avg, cycles[NR_TIME_ITERS];
256 cycles[0] = get_cycles_per_usec();
257 S = delta = mean = 0.0;
258 for (i = 0; i < NR_TIME_ITERS; i++) {
259 cycles[i] = get_cycles_per_usec();
260 delta = cycles[i] - mean;
262 mean += delta / (i + 1.0);
263 S += delta * (cycles[i] - mean);
268 * The most common platform clock breakage is returning zero
269 * indefinitely. Check for that and return failure.
271 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
274 S = sqrt(S / (NR_TIME_ITERS - 1.0));
277 for (i = 0; i < NR_TIME_ITERS; i++) {
278 double this = cycles[i];
280 if ((fmax(this, mean) - fmin(this, mean)) > S)
286 S /= (double) NR_TIME_ITERS;
289 for (i = 0; i < NR_TIME_ITERS; i++)
290 dprint(FD_TIME, "cycles[%d]=%llu\n", i,
291 (unsigned long long) cycles[i] / 10);
294 avg = (avg + 5) / 10;
295 dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
296 dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
298 cycles_per_usec = avg;
299 inv_cycles_per_usec = 16777216UL / cycles_per_usec;
300 max_cycles_for_mult = ~0ULL / inv_cycles_per_usec;
301 dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
305 static int calibrate_cpu_clock(void)
307 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
313 #endif // ARCH_HAVE_CPU_CLOCK
315 #ifndef CONFIG_TLS_THREAD
316 void fio_local_clock_init(int is_thread)
320 t = calloc(1, sizeof(*t));
321 if (pthread_setspecific(tv_tls_key, t)) {
322 log_err("fio: can't set TLS key\n");
327 static void kill_tv_tls_key(void *data)
332 void fio_local_clock_init(int is_thread)
337 void fio_clock_init(void)
339 if (fio_clock_source == fio_clock_source_inited)
342 #ifndef CONFIG_TLS_THREAD
343 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
344 log_err("fio: can't create TLS key\n");
347 fio_clock_source_inited = fio_clock_source;
349 if (calibrate_cpu_clock())
353 * If the arch sets tsc_reliable != 0, then it must be good enough
354 * to use as THE clock source. For x86 CPUs, this means the TSC
355 * runs at a constant rate and is synced across CPU cores.
358 if (!fio_clock_source_set)
359 fio_clock_source = CS_CPUCLOCK;
360 } else if (fio_clock_source == CS_CPUCLOCK)
361 log_info("fio: clocksource=cpu may not be reliable\n");
364 uint64_t utime_since(const struct timeval *s, const struct timeval *e)
369 sec = e->tv_sec - s->tv_sec;
370 usec = e->tv_usec - s->tv_usec;
371 if (sec > 0 && usec < 0) {
377 * time warp bug on some kernels?
379 if (sec < 0 || (sec == 0 && usec < 0))
382 ret = sec * 1000000ULL + usec;
387 uint64_t utime_since_now(const struct timeval *s)
391 fio_gettime(&t, NULL);
392 return utime_since(s, &t);
395 uint64_t mtime_since(const struct timeval *s, const struct timeval *e)
399 sec = e->tv_sec - s->tv_sec;
400 usec = e->tv_usec - s->tv_usec;
401 if (sec > 0 && usec < 0) {
406 if (sec < 0 || (sec == 0 && usec < 0))
416 uint64_t mtime_since_now(const struct timeval *s)
419 void *p = __builtin_return_address(0);
422 return mtime_since(s, &t);
425 uint64_t time_since_now(const struct timeval *s)
427 return mtime_since_now(s) / 1000;
430 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
433 #define CLOCK_ENTRIES 100000
441 struct clock_thread {
444 pthread_mutex_t lock;
445 pthread_mutex_t started;
447 struct clock_entry *entries;
450 static inline uint32_t atomic32_inc_return(uint32_t *seq)
452 return 1 + __sync_fetch_and_add(seq, 1);
455 static void *clock_thread_fn(void *data)
457 struct clock_thread *t = data;
458 struct clock_entry *c;
459 os_cpu_mask_t cpu_mask;
463 memset(&cpu_mask, 0, sizeof(cpu_mask));
464 fio_cpu_set(&cpu_mask, t->cpu);
466 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
467 log_err("clock setaffinity failed\n");
471 pthread_mutex_lock(&t->lock);
472 pthread_mutex_unlock(&t->started);
476 for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
482 seq = atomic32_inc_return(t->seq);
485 tsc = get_cpu_clock();
486 } while (seq != *t->seq);
492 log_info("cs: cpu%3d: %llu clocks seen\n", t->cpu,
493 (unsigned long long) t->entries[i - 1].tsc - t->entries[0].tsc);
496 * The most common platform clock breakage is returning zero
497 * indefinitely. Check for that and return failure.
499 if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
505 static int clock_cmp(const void *p1, const void *p2)
507 const struct clock_entry *c1 = p1;
508 const struct clock_entry *c2 = p2;
510 if (c1->seq == c2->seq)
511 log_err("cs: bug in atomic sequence!\n");
513 return c1->seq - c2->seq;
516 int fio_monotonic_clocktest(void)
518 struct clock_thread *cthreads;
519 unsigned int nr_cpus = cpus_online();
520 struct clock_entry *entries;
521 unsigned long tentries, failed = 0;
522 struct clock_entry *prev, *this;
526 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
529 fio_debug |= 1U << FD_TIME;
531 calibrate_cpu_clock();
533 fio_debug &= ~(1U << FD_TIME);
536 cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
537 tentries = CLOCK_ENTRIES * nr_cpus;
538 entries = malloc(tentries * sizeof(struct clock_entry));
540 log_info("cs: Testing %u CPUs\n", nr_cpus);
542 for (i = 0; i < nr_cpus; i++) {
543 struct clock_thread *t = &cthreads[i];
547 t->entries = &entries[i * CLOCK_ENTRIES];
548 pthread_mutex_init(&t->lock, NULL);
549 pthread_mutex_init(&t->started, NULL);
550 pthread_mutex_lock(&t->lock);
551 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
558 for (i = 0; i < nr_cpus; i++) {
559 struct clock_thread *t = &cthreads[i];
561 pthread_mutex_lock(&t->started);
564 for (i = 0; i < nr_cpus; i++) {
565 struct clock_thread *t = &cthreads[i];
567 pthread_mutex_unlock(&t->lock);
570 for (i = 0; i < nr_cpus; i++) {
571 struct clock_thread *t = &cthreads[i];
574 pthread_join(t->thread, &ret);
581 log_err("Clocksource test: %lu threads failed\n", failed);
585 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
587 for (failed = i = 0; i < tentries; i++) {
595 if (prev->tsc > this->tsc) {
596 uint64_t diff = prev->tsc - this->tsc;
598 log_info("cs: CPU clock mismatch (diff=%llu):\n",
599 (unsigned long long) diff);
600 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
601 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
609 log_info("cs: Failed: %lu\n", failed);
611 log_info("cs: Pass!\n");
618 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
620 int fio_monotonic_clocktest(void)
622 log_info("cs: current platform does not support CPU clocks\n");