16 #if defined(ARCH_HAVE_CPU_CLOCK)
17 #ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
18 static unsigned long cycles_per_msec;
19 static unsigned long long cycles_start;
20 static unsigned long long clock_mult;
21 static unsigned long long max_cycles_mask;
22 static unsigned long long nsecs_for_max_cycles;
23 static unsigned int clock_shift;
24 static unsigned int max_cycles_shift;
25 #define MAX_CLOCK_SEC 60*60
27 #ifdef ARCH_CPU_CLOCK_WRAPS
28 static unsigned int cycles_wrap;
36 #ifdef ARCH_HAVE_CPU_CLOCK
37 #ifdef CONFIG_TLS_THREAD
38 static __thread struct tv_valid static_tv_valid;
40 static pthread_key_t tv_tls_key;
44 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
45 int fio_clock_source_set = 0;
46 static enum fio_cs fio_clock_source_inited = CS_INVAL;
51 #define HASH_SIZE (1 << HASH_BITS)
53 static struct flist_head hash[HASH_SIZE];
54 static int gtod_inited;
57 struct flist_head list;
62 static struct gtod_log *find_hash(void *caller)
64 unsigned long h = hash_ptr(caller, HASH_BITS);
65 struct flist_head *entry;
67 flist_for_each(entry, &hash[h]) {
68 struct gtod_log *log = flist_entry(entry, struct gtod_log,
71 if (log->caller == caller)
78 static void inc_caller(void *caller)
80 struct gtod_log *log = find_hash(caller);
85 log = malloc(sizeof(*log));
86 INIT_FLIST_HEAD(&log->list);
90 h = hash_ptr(caller, HASH_BITS);
91 flist_add_tail(&log->list, &hash[h]);
97 static void gtod_log_caller(void *caller)
103 static void fio_exit fio_dump_gtod(void)
105 unsigned long total_calls = 0;
108 for (i = 0; i < HASH_SIZE; i++) {
109 struct flist_head *entry;
110 struct gtod_log *log;
112 flist_for_each(entry, &hash[i]) {
113 log = flist_entry(entry, struct gtod_log, list);
115 printf("function %p, calls %lu\n", log->caller,
117 total_calls += log->calls;
121 printf("Total %lu gettimeofday\n", total_calls);
124 static void fio_init gtod_init(void)
128 for (i = 0; i < HASH_SIZE; i++)
129 INIT_FLIST_HEAD(&hash[i]);
134 #endif /* FIO_DEBUG_TIME */
136 #ifdef CONFIG_CLOCK_GETTIME
137 static int fill_clock_gettime(struct timespec *ts)
139 #if defined(CONFIG_CLOCK_MONOTONIC_RAW)
140 return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
141 #elif defined(CONFIG_CLOCK_MONOTONIC)
142 return clock_gettime(CLOCK_MONOTONIC, ts);
144 return clock_gettime(CLOCK_REALTIME, ts);
149 static void __fio_gettime(struct timespec *tp)
151 switch (fio_clock_source) {
152 #ifdef CONFIG_GETTIMEOFDAY
155 gettimeofday(&tv, NULL);
157 tp->tv_sec = tv.tv_sec;
158 tp->tv_nsec = tv.tv_usec * 1000;
162 #ifdef CONFIG_CLOCK_GETTIME
164 if (fill_clock_gettime(tp) < 0) {
165 log_err("fio: clock_gettime fails\n");
171 #ifdef ARCH_HAVE_CPU_CLOCK
173 uint64_t nsecs, t, multiples;
176 #ifdef CONFIG_TLS_THREAD
177 tv = &static_tv_valid;
179 tv = pthread_getspecific(tv_tls_key);
183 #ifdef ARCH_CPU_CLOCK_WRAPS
184 if (t < cycles_start && !cycles_wrap)
186 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
187 log_err("fio: double CPU clock wrap\n");
191 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
192 nsecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC * 1000;
195 multiples = t >> max_cycles_shift;
196 nsecs = multiples * nsecs_for_max_cycles;
197 nsecs += ((t & max_cycles_mask) * clock_mult) >> clock_shift;
199 tp->tv_sec = nsecs / 1000000000ULL;
200 tp->tv_nsec = nsecs % 1000000000ULL;
205 log_err("fio: invalid clock source %d\n", fio_clock_source);
210 #ifdef FIO_DEBUG_TIME
211 void fio_gettime(struct timespec *tp, void *caller)
213 void fio_gettime(struct timespec *tp, void fio_unused *caller)
216 #ifdef FIO_DEBUG_TIME
218 caller = __builtin_return_address(0);
220 gtod_log_caller(caller);
222 if (fio_unlikely(fio_gettime_offload(tp)))
228 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
229 static unsigned long get_cycles_per_msec(void)
231 struct timespec s, e;
233 enum fio_cs old_cs = fio_clock_source;
236 #ifdef CONFIG_CLOCK_GETTIME
237 fio_clock_source = CS_CGETTIME;
239 fio_clock_source = CS_GTOD;
243 c_s = get_cpu_clock();
247 elapsed = utime_since(&s, &e);
248 if (elapsed >= 1280) {
249 c_e = get_cpu_clock();
254 fio_clock_source = old_cs;
255 return (c_e - c_s) * 1000 / elapsed;
258 #define NR_TIME_ITERS 50
260 static int calibrate_cpu_clock(void)
262 double delta, mean, S;
263 uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
264 int i, samples, sft = 0;
265 unsigned long long tmp, max_ticks, max_mult;
267 cycles[0] = get_cycles_per_msec();
268 S = delta = mean = 0.0;
269 for (i = 0; i < NR_TIME_ITERS; i++) {
270 cycles[i] = get_cycles_per_msec();
271 delta = cycles[i] - mean;
273 mean += delta / (i + 1.0);
274 S += delta * (cycles[i] - mean);
279 * The most common platform clock breakage is returning zero
280 * indefinitely. Check for that and return failure.
282 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
285 S = sqrt(S / (NR_TIME_ITERS - 1.0));
288 maxc = samples = avg = 0;
289 for (i = 0; i < NR_TIME_ITERS; i++) {
290 double this = cycles[i];
292 minc = min(cycles[i], minc);
293 maxc = max(cycles[i], maxc);
295 if ((fmax(this, mean) - fmin(this, mean)) > S)
301 S /= (double) NR_TIME_ITERS;
303 for (i = 0; i < NR_TIME_ITERS; i++)
304 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
307 cycles_per_msec = avg;
308 dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
309 dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
310 (unsigned long long) minc,
311 (unsigned long long) maxc, mean, S);
313 max_ticks = MAX_CLOCK_SEC * cycles_per_msec * 1000ULL;
314 max_mult = ULLONG_MAX / max_ticks;
315 dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, max_mult=%llu\n",
316 max_ticks, __builtin_clzll(max_ticks), max_mult);
319 * Find the largest shift count that will produce
320 * a multiplier that does not exceed max_mult
322 tmp = max_mult * cycles_per_msec / 1000000;
326 dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
330 clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
331 dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift, clock_mult);
333 // Find the greatest power of 2 clock ticks that is less than the ticks in MAX_CLOCK_SEC_2STAGE
334 max_cycles_shift = max_cycles_mask = 0;
335 tmp = MAX_CLOCK_SEC * 1000ULL * cycles_per_msec;
336 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
340 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
342 // if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec here we will
343 // have a discontinuity every (1ULL << max_cycles_shift) cycles
344 nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult) >> clock_shift;
346 // Use a bitmask to calculate ticks % (1ULL << max_cycles_shift)
347 for (tmp = 0; tmp < max_cycles_shift; tmp++)
348 max_cycles_mask |= 1ULL << tmp;
350 dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, nsecs_for_max_cycles=%llu, max_cycles_mask=%016llx\n",
351 max_cycles_shift, (1ULL << max_cycles_shift),
352 nsecs_for_max_cycles, max_cycles_mask);
354 cycles_start = get_cpu_clock();
355 dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
359 static int calibrate_cpu_clock(void)
361 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
367 #endif // ARCH_HAVE_CPU_CLOCK
369 #ifndef CONFIG_TLS_THREAD
370 void fio_local_clock_init(int is_thread)
374 t = calloc(1, sizeof(*t));
375 if (pthread_setspecific(tv_tls_key, t)) {
376 log_err("fio: can't set TLS key\n");
381 static void kill_tv_tls_key(void *data)
386 void fio_local_clock_init(int is_thread)
391 void fio_clock_init(void)
393 if (fio_clock_source == fio_clock_source_inited)
396 #ifndef CONFIG_TLS_THREAD
397 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
398 log_err("fio: can't create TLS key\n");
401 fio_clock_source_inited = fio_clock_source;
403 if (calibrate_cpu_clock())
407 * If the arch sets tsc_reliable != 0, then it must be good enough
408 * to use as THE clock source. For x86 CPUs, this means the TSC
409 * runs at a constant rate and is synced across CPU cores.
412 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
413 fio_clock_source = CS_CPUCLOCK;
414 } else if (fio_clock_source == CS_CPUCLOCK)
415 log_info("fio: clocksource=cpu may not be reliable\n");
418 uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
422 sec = e->tv_sec - s->tv_sec;
423 nsec = e->tv_nsec - s->tv_nsec;
424 if (sec > 0 && nsec < 0) {
426 nsec += 1000000000LL;
430 * time warp bug on some kernels?
432 if (sec < 0 || (sec == 0 && nsec < 0))
435 return nsec + (sec * 1000000000LL);
438 uint64_t utime_since(const struct timespec *s, const struct timespec *e)
442 sec = e->tv_sec - s->tv_sec;
443 usec = (e->tv_nsec - s->tv_nsec) / 1000;
444 if (sec > 0 && usec < 0) {
450 * time warp bug on some kernels?
452 if (sec < 0 || (sec == 0 && usec < 0))
455 return usec + (sec * 1000000);
458 uint64_t utime_since_now(const struct timespec *s)
461 #ifdef FIO_DEBUG_TIME
462 void *p = __builtin_return_address(0);
466 fio_gettime(&t, NULL);
469 return utime_since(s, &t);
472 uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
476 sec = e->tv_sec - s->tv_sec;
477 usec = (e->tv_usec - s->tv_usec);
478 if (sec > 0 && usec < 0) {
483 if (sec < 0 || (sec == 0 && usec < 0))
491 uint64_t mtime_since_now(const struct timespec *s)
494 #ifdef FIO_DEBUG_TIME
495 void *p = __builtin_return_address(0);
499 fio_gettime(&t, NULL);
502 return mtime_since(s, &t);
505 uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
509 sec = e->tv_sec - s->tv_sec;
510 usec = (e->tv_nsec - s->tv_nsec) / 1000;
511 if (sec > 0 && usec < 0) {
516 if (sec < 0 || (sec == 0 && usec < 0))
524 uint64_t time_since_now(const struct timespec *s)
526 return mtime_since_now(s) / 1000;
529 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
532 #define CLOCK_ENTRIES_DEBUG 100000
533 #define CLOCK_ENTRIES_TEST 10000
541 struct clock_thread {
545 pthread_mutex_t lock;
546 pthread_mutex_t started;
547 unsigned long nr_entries;
549 struct clock_entry *entries;
552 static inline uint32_t atomic32_inc_return(uint32_t *seq)
554 return 1 + __sync_fetch_and_add(seq, 1);
557 static void *clock_thread_fn(void *data)
559 struct clock_thread *t = data;
560 struct clock_entry *c;
561 os_cpu_mask_t cpu_mask;
563 unsigned long long first;
566 if (fio_cpuset_init(&cpu_mask)) {
569 log_err("clock cpuset init failed: %s\n", strerror(__err));
573 fio_cpu_set(&cpu_mask, t->cpu);
575 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
578 log_err("clock setaffinity failed: %s\n", strerror(__err));
582 pthread_mutex_lock(&t->lock);
583 pthread_mutex_unlock(&t->started);
585 first = get_cpu_clock();
588 for (i = 0; i < t->nr_entries; i++, c++) {
594 seq = atomic32_inc_return(t->seq);
597 tsc = get_cpu_clock();
598 } while (seq != *t->seq);
605 unsigned long long clocks;
607 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
608 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
613 * The most common platform clock breakage is returning zero
614 * indefinitely. Check for that and return failure.
616 if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
619 fio_cpuset_exit(&cpu_mask);
622 fio_cpuset_exit(&cpu_mask);
627 static int clock_cmp(const void *p1, const void *p2)
629 const struct clock_entry *c1 = p1;
630 const struct clock_entry *c2 = p2;
632 if (c1->seq == c2->seq)
633 log_err("cs: bug in atomic sequence!\n");
635 return c1->seq - c2->seq;
638 int fio_monotonic_clocktest(int debug)
640 struct clock_thread *cthreads;
641 unsigned int nr_cpus = cpus_online();
642 struct clock_entry *entries;
643 unsigned long nr_entries, tentries, failed = 0;
644 struct clock_entry *prev, *this;
649 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
652 fio_debug |= 1U << FD_TIME;
654 nr_entries = CLOCK_ENTRIES_DEBUG;
656 nr_entries = CLOCK_ENTRIES_TEST;
658 calibrate_cpu_clock();
662 fio_debug &= ~(1U << FD_TIME);
666 cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
667 tentries = nr_entries * nr_cpus;
668 entries = malloc(tentries * sizeof(struct clock_entry));
671 log_info("cs: Testing %u CPUs\n", nr_cpus);
673 for (i = 0; i < nr_cpus; i++) {
674 struct clock_thread *t = &cthreads[i];
679 t->nr_entries = nr_entries;
680 t->entries = &entries[i * nr_entries];
681 pthread_mutex_init(&t->lock, NULL);
682 pthread_mutex_init(&t->started, NULL);
683 pthread_mutex_lock(&t->lock);
684 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
691 for (i = 0; i < nr_cpus; i++) {
692 struct clock_thread *t = &cthreads[i];
694 pthread_mutex_lock(&t->started);
697 for (i = 0; i < nr_cpus; i++) {
698 struct clock_thread *t = &cthreads[i];
700 pthread_mutex_unlock(&t->lock);
703 for (i = 0; i < nr_cpus; i++) {
704 struct clock_thread *t = &cthreads[i];
707 pthread_join(t->thread, &ret);
715 log_err("Clocksource test: %lu threads failed\n", failed);
719 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
721 /* silence silly gcc */
723 for (failed = i = 0; i < tentries; i++) {
731 if (prev->tsc > this->tsc) {
732 uint64_t diff = prev->tsc - this->tsc;
739 log_info("cs: CPU clock mismatch (diff=%llu):\n",
740 (unsigned long long) diff);
741 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
742 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
751 log_info("cs: Failed: %lu\n", failed);
753 log_info("cs: Pass!\n");
760 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
762 int fio_monotonic_clocktest(int debug)
765 log_info("cs: current platform does not support CPU clocks\n");