10 #if defined(ARCH_HAVE_CPU_CLOCK)
11 #ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
12 static unsigned long long cycles_per_msec;
13 static unsigned long long cycles_start;
14 static unsigned long long clock_mult;
15 static unsigned long long max_cycles_mask;
16 static unsigned long long nsecs_for_max_cycles;
17 static unsigned int clock_shift;
18 static unsigned int max_cycles_shift;
19 #define MAX_CLOCK_SEC 60*60
21 #ifdef ARCH_CPU_CLOCK_WRAPS
22 static unsigned int cycles_wrap;
25 bool tsc_reliable = false;
30 #ifdef ARCH_HAVE_CPU_CLOCK
31 #ifdef CONFIG_TLS_THREAD
32 static __thread struct tv_valid static_tv_valid;
34 static pthread_key_t tv_tls_key;
38 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
39 int fio_clock_source_set = 0;
40 static enum fio_cs fio_clock_source_inited = CS_INVAL;
45 #define HASH_SIZE (1 << HASH_BITS)
47 static struct flist_head hash[HASH_SIZE];
48 static int gtod_inited;
51 struct flist_head list;
56 static struct gtod_log *find_hash(void *caller)
58 unsigned long h = hash_ptr(caller, HASH_BITS);
59 struct flist_head *entry;
61 flist_for_each(entry, &hash[h]) {
62 struct gtod_log *log = flist_entry(entry, struct gtod_log,
65 if (log->caller == caller)
72 static void inc_caller(void *caller)
74 struct gtod_log *log = find_hash(caller);
79 log = malloc(sizeof(*log));
80 INIT_FLIST_HEAD(&log->list);
84 h = hash_ptr(caller, HASH_BITS);
85 flist_add_tail(&log->list, &hash[h]);
91 static void gtod_log_caller(void *caller)
97 static void fio_exit fio_dump_gtod(void)
99 unsigned long total_calls = 0;
102 for (i = 0; i < HASH_SIZE; i++) {
103 struct flist_head *entry;
104 struct gtod_log *log;
106 flist_for_each(entry, &hash[i]) {
107 log = flist_entry(entry, struct gtod_log, list);
109 printf("function %p, calls %lu\n", log->caller,
111 total_calls += log->calls;
115 printf("Total %lu gettimeofday\n", total_calls);
118 static void fio_init gtod_init(void)
122 for (i = 0; i < HASH_SIZE; i++)
123 INIT_FLIST_HEAD(&hash[i]);
128 #endif /* FIO_DEBUG_TIME */
130 #ifdef CONFIG_CLOCK_GETTIME
131 static int fill_clock_gettime(struct timespec *ts)
133 #if defined(CONFIG_CLOCK_MONOTONIC_RAW)
134 return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
135 #elif defined(CONFIG_CLOCK_MONOTONIC)
136 return clock_gettime(CLOCK_MONOTONIC, ts);
138 return clock_gettime(CLOCK_REALTIME, ts);
143 static void __fio_gettime(struct timespec *tp)
145 switch (fio_clock_source) {
146 #ifdef CONFIG_GETTIMEOFDAY
149 gettimeofday(&tv, NULL);
151 tp->tv_sec = tv.tv_sec;
152 tp->tv_nsec = tv.tv_usec * 1000;
156 #ifdef CONFIG_CLOCK_GETTIME
158 if (fill_clock_gettime(tp) < 0) {
159 log_err("fio: clock_gettime fails\n");
165 #ifdef ARCH_HAVE_CPU_CLOCK
167 uint64_t nsecs, t, multiples;
170 #ifdef CONFIG_TLS_THREAD
171 tv = &static_tv_valid;
173 tv = pthread_getspecific(tv_tls_key);
177 #ifdef ARCH_CPU_CLOCK_WRAPS
178 if (t < cycles_start && !cycles_wrap)
180 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
181 log_err("fio: double CPU clock wrap\n");
185 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
186 nsecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC * 1000;
189 multiples = t >> max_cycles_shift;
190 nsecs = multiples * nsecs_for_max_cycles;
191 nsecs += ((t & max_cycles_mask) * clock_mult) >> clock_shift;
193 tp->tv_sec = nsecs / 1000000000ULL;
194 tp->tv_nsec = nsecs % 1000000000ULL;
199 log_err("fio: invalid clock source %d\n", fio_clock_source);
204 #ifdef FIO_DEBUG_TIME
205 void fio_gettime(struct timespec *tp, void *caller)
207 void fio_gettime(struct timespec *tp, void fio_unused *caller)
210 #ifdef FIO_DEBUG_TIME
212 caller = __builtin_return_address(0);
214 gtod_log_caller(caller);
216 if (fio_unlikely(fio_gettime_offload(tp)))
222 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
223 static unsigned long get_cycles_per_msec(void)
225 struct timespec s, e;
227 enum fio_cs old_cs = fio_clock_source;
230 #ifdef CONFIG_CLOCK_GETTIME
231 fio_clock_source = CS_CGETTIME;
233 fio_clock_source = CS_GTOD;
237 c_s = get_cpu_clock();
240 c_e = get_cpu_clock();
242 elapsed = ntime_since(&s, &e);
243 if (elapsed >= 1280000)
247 fio_clock_source = old_cs;
248 return (c_e - c_s) * 1000000 / elapsed;
251 #define NR_TIME_ITERS 50
253 static int calibrate_cpu_clock(void)
255 double delta, mean, S;
256 uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
257 int i, samples, sft = 0;
258 unsigned long long tmp, max_ticks, max_mult;
260 cycles[0] = get_cycles_per_msec();
261 S = delta = mean = 0.0;
262 for (i = 0; i < NR_TIME_ITERS; i++) {
263 cycles[i] = get_cycles_per_msec();
264 delta = cycles[i] - mean;
266 mean += delta / (i + 1.0);
267 S += delta * (cycles[i] - mean);
272 * The most common platform clock breakage is returning zero
273 * indefinitely. Check for that and return failure.
275 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
278 S = sqrt(S / (NR_TIME_ITERS - 1.0));
281 maxc = samples = avg = 0;
282 for (i = 0; i < NR_TIME_ITERS; i++) {
283 double this = cycles[i];
285 minc = min(cycles[i], minc);
286 maxc = max(cycles[i], maxc);
288 if ((fmax(this, mean) - fmin(this, mean)) > S)
294 S /= (double) NR_TIME_ITERS;
296 for (i = 0; i < NR_TIME_ITERS; i++)
297 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
300 cycles_per_msec = avg;
301 dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f, N=%d\n",
302 (unsigned long long) minc,
303 (unsigned long long) maxc, mean, S, NR_TIME_ITERS);
304 dprint(FD_TIME, "trimmed mean=%llu, N=%d\n", (unsigned long long) avg, samples);
306 max_ticks = MAX_CLOCK_SEC * cycles_per_msec * 1000ULL;
307 max_mult = ULLONG_MAX / max_ticks;
308 dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, "
309 "max_mult=%llu\n", max_ticks,
310 __builtin_clzll(max_ticks), max_mult);
313 * Find the largest shift count that will produce
314 * a multiplier that does not exceed max_mult
316 tmp = max_mult * cycles_per_msec / 1000000;
320 dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
324 clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
325 dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift,
329 * Find the greatest power of 2 clock ticks that is less than the
330 * ticks in MAX_CLOCK_SEC_2STAGE
332 max_cycles_shift = max_cycles_mask = 0;
333 tmp = MAX_CLOCK_SEC * 1000ULL * cycles_per_msec;
334 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp,
339 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
342 * if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec
343 * here we will have a discontinuity every
344 * (1ULL << max_cycles_shift) cycles
346 nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult)
349 /* Use a bitmask to calculate ticks % (1ULL << max_cycles_shift) */
350 for (tmp = 0; tmp < max_cycles_shift; tmp++)
351 max_cycles_mask |= 1ULL << tmp;
353 dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, "
354 "nsecs_for_max_cycles=%llu, "
355 "max_cycles_mask=%016llx\n",
356 max_cycles_shift, (1ULL << max_cycles_shift),
357 nsecs_for_max_cycles, max_cycles_mask);
359 cycles_start = get_cpu_clock();
360 dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
364 static int calibrate_cpu_clock(void)
366 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
372 #endif // ARCH_HAVE_CPU_CLOCK
374 #ifndef CONFIG_TLS_THREAD
375 void fio_local_clock_init(void)
379 t = calloc(1, sizeof(*t));
380 if (pthread_setspecific(tv_tls_key, t)) {
381 log_err("fio: can't set TLS key\n");
386 static void kill_tv_tls_key(void *data)
391 void fio_local_clock_init(void)
396 void fio_clock_init(void)
398 if (fio_clock_source == fio_clock_source_inited)
401 #ifndef CONFIG_TLS_THREAD
402 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
403 log_err("fio: can't create TLS key\n");
406 fio_clock_source_inited = fio_clock_source;
408 if (calibrate_cpu_clock())
409 tsc_reliable = false;
412 * If the arch sets tsc_reliable != 0, then it must be good enough
413 * to use as THE clock source. For x86 CPUs, this means the TSC
414 * runs at a constant rate and is synced across CPU cores.
417 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
418 fio_clock_source = CS_CPUCLOCK;
419 } else if (fio_clock_source == CS_CPUCLOCK)
420 log_info("fio: clocksource=cpu may not be reliable\n");
421 dprint(FD_TIME, "gettime: clocksource=%d\n", (int) fio_clock_source);
424 uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
428 sec = e->tv_sec - s->tv_sec;
429 nsec = e->tv_nsec - s->tv_nsec;
430 if (sec > 0 && nsec < 0) {
432 nsec += 1000000000LL;
436 * time warp bug on some kernels?
438 if (sec < 0 || (sec == 0 && nsec < 0))
441 return nsec + (sec * 1000000000LL);
444 uint64_t ntime_since_now(const struct timespec *s)
448 fio_gettime(&now, NULL);
449 return ntime_since(s, &now);
452 uint64_t utime_since(const struct timespec *s, const struct timespec *e)
456 sec = e->tv_sec - s->tv_sec;
457 usec = (e->tv_nsec - s->tv_nsec) / 1000;
458 if (sec > 0 && usec < 0) {
464 * time warp bug on some kernels?
466 if (sec < 0 || (sec == 0 && usec < 0))
469 return usec + (sec * 1000000);
472 uint64_t utime_since_now(const struct timespec *s)
475 #ifdef FIO_DEBUG_TIME
476 void *p = __builtin_return_address(0);
480 fio_gettime(&t, NULL);
483 return utime_since(s, &t);
486 uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
490 sec = e->tv_sec - s->tv_sec;
491 usec = (e->tv_usec - s->tv_usec);
492 if (sec > 0 && usec < 0) {
497 if (sec < 0 || (sec == 0 && usec < 0))
505 uint64_t mtime_since_now(const struct timespec *s)
508 #ifdef FIO_DEBUG_TIME
509 void *p = __builtin_return_address(0);
513 fio_gettime(&t, NULL);
516 return mtime_since(s, &t);
519 uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
523 sec = e->tv_sec - s->tv_sec;
524 usec = (e->tv_nsec - s->tv_nsec) / 1000;
525 if (sec > 0 && usec < 0) {
530 if (sec < 0 || (sec == 0 && usec < 0))
538 uint64_t time_since_now(const struct timespec *s)
540 return mtime_since_now(s) / 1000;
543 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
544 defined(CONFIG_SYNC_SYNC) && defined(CONFIG_CMP_SWAP)
546 #define CLOCK_ENTRIES_DEBUG 100000
547 #define CLOCK_ENTRIES_TEST 1000
555 struct clock_thread {
560 unsigned long nr_entries;
562 struct clock_entry *entries;
565 static inline uint32_t atomic32_compare_and_swap(uint32_t *ptr, uint32_t old,
568 return __sync_val_compare_and_swap(ptr, old, new);
571 static void *clock_thread_fn(void *data)
573 struct clock_thread *t = data;
574 struct clock_entry *c;
575 os_cpu_mask_t cpu_mask;
576 unsigned long long first;
579 if (fio_cpuset_init(&cpu_mask)) {
582 log_err("clock cpuset init failed: %s\n", strerror(__err));
586 fio_cpu_set(&cpu_mask, t->cpu);
588 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
591 log_err("clock setaffinity failed: %s\n", strerror(__err));
595 fio_sem_down(&t->lock);
597 first = get_cpu_clock();
599 for (i = 0; i < t->nr_entries; i++, c++) {
608 __sync_synchronize();
609 tsc = get_cpu_clock();
610 } while (seq != atomic32_compare_and_swap(t->seq, seq, seq + 1));
620 unsigned long long clocks;
622 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
623 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
628 * The most common platform clock breakage is returning zero
629 * indefinitely. Check for that and return failure.
631 if (i > 1 && !t->entries[i - 1].tsc && !t->entries[0].tsc)
634 fio_cpuset_exit(&cpu_mask);
637 fio_cpuset_exit(&cpu_mask);
642 static int clock_cmp(const void *p1, const void *p2)
644 const struct clock_entry *c1 = p1;
645 const struct clock_entry *c2 = p2;
647 if (c1->seq == c2->seq)
648 log_err("cs: bug in atomic sequence!\n");
650 return c1->seq - c2->seq;
653 int fio_monotonic_clocktest(int debug)
655 struct clock_thread *cthreads;
656 unsigned int nr_cpus = cpus_online();
657 struct clock_entry *entries;
658 unsigned long nr_entries, tentries, failed = 0;
659 struct clock_entry *prev, *this;
664 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
667 fio_debug |= 1U << FD_TIME;
669 nr_entries = CLOCK_ENTRIES_DEBUG;
671 nr_entries = CLOCK_ENTRIES_TEST;
673 calibrate_cpu_clock();
677 fio_debug &= ~(1U << FD_TIME);
681 cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
682 tentries = nr_entries * nr_cpus;
683 entries = malloc(tentries * sizeof(struct clock_entry));
686 log_info("cs: Testing %u CPUs\n", nr_cpus);
688 for (i = 0; i < nr_cpus; i++) {
689 struct clock_thread *t = &cthreads[i];
694 t->nr_entries = nr_entries;
695 t->entries = &entries[i * nr_entries];
696 __fio_sem_init(&t->lock, FIO_SEM_LOCKED);
697 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
704 for (i = 0; i < nr_cpus; i++) {
705 struct clock_thread *t = &cthreads[i];
707 fio_sem_up(&t->lock);
710 for (i = 0; i < nr_cpus; i++) {
711 struct clock_thread *t = &cthreads[i];
714 pthread_join(t->thread, &ret);
717 __fio_sem_remove(&t->lock);
723 log_err("Clocksource test: %lu threads failed\n", failed);
727 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
729 /* silence silly gcc */
731 for (failed = i = 0; i < tentries; i++) {
739 if (prev->tsc > this->tsc) {
740 uint64_t diff = prev->tsc - this->tsc;
747 log_info("cs: CPU clock mismatch (diff=%llu):\n",
748 (unsigned long long) diff);
749 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
750 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
759 log_info("cs: Failed: %lu\n", failed);
761 log_info("cs: Pass!\n");
768 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
770 int fio_monotonic_clocktest(int debug)
773 log_info("cs: current platform does not support CPU clocks\n");