16 #if defined(ARCH_HAVE_CPU_CLOCK)
17 #ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
18 static unsigned long cycles_per_usec;
19 static unsigned long long cycles_start;
20 static unsigned long long clock_mult;
21 static unsigned long long max_cycles_mask;
22 static unsigned long long nsecs_for_max_cycles;
23 static unsigned int clock_shift;
24 static unsigned int max_cycles_shift;
25 #define MAX_CLOCK_SEC 60*60
27 #ifdef ARCH_CPU_CLOCK_WRAPS
28 static unsigned int cycles_wrap;
38 #ifdef ARCH_HAVE_CPU_CLOCK
39 #ifdef CONFIG_TLS_THREAD
40 static __thread struct tv_valid static_tv_valid;
42 static pthread_key_t tv_tls_key;
46 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
47 int fio_clock_source_set = 0;
48 static enum fio_cs fio_clock_source_inited = CS_INVAL;
53 #define HASH_SIZE (1 << HASH_BITS)
55 static struct flist_head hash[HASH_SIZE];
56 static int gtod_inited;
59 struct flist_head list;
64 static struct gtod_log *find_hash(void *caller)
66 unsigned long h = hash_ptr(caller, HASH_BITS);
67 struct flist_head *entry;
69 flist_for_each(entry, &hash[h]) {
70 struct gtod_log *log = flist_entry(entry, struct gtod_log,
73 if (log->caller == caller)
80 static void inc_caller(void *caller)
82 struct gtod_log *log = find_hash(caller);
87 log = malloc(sizeof(*log));
88 INIT_FLIST_HEAD(&log->list);
92 h = hash_ptr(caller, HASH_BITS);
93 flist_add_tail(&log->list, &hash[h]);
99 static void gtod_log_caller(void *caller)
105 static void fio_exit fio_dump_gtod(void)
107 unsigned long total_calls = 0;
110 for (i = 0; i < HASH_SIZE; i++) {
111 struct flist_head *entry;
112 struct gtod_log *log;
114 flist_for_each(entry, &hash[i]) {
115 log = flist_entry(entry, struct gtod_log, list);
117 printf("function %p, calls %lu\n", log->caller,
119 total_calls += log->calls;
123 printf("Total %lu gettimeofday\n", total_calls);
126 static void fio_init gtod_init(void)
130 for (i = 0; i < HASH_SIZE; i++)
131 INIT_FLIST_HEAD(&hash[i]);
136 #endif /* FIO_DEBUG_TIME */
138 #ifdef CONFIG_CLOCK_GETTIME
139 static int fill_clock_gettime(struct timespec *ts)
141 #if defined(CONFIG_CLOCK_MONOTONIC_RAW)
142 return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
143 #elif defined(CONFIG_CLOCK_MONOTONIC)
144 return clock_gettime(CLOCK_MONOTONIC, ts);
146 return clock_gettime(CLOCK_REALTIME, ts);
151 static void __fio_gettime(struct timespec *tp)
153 switch (fio_clock_source) {
154 #ifdef CONFIG_GETTIMEOFDAY
157 gettimeofday(&tv, NULL);
159 tp->tv_sec = tv.tv_sec;
160 tp->tv_nsec = tv.tv_usec * 1000;
164 #ifdef CONFIG_CLOCK_GETTIME
166 if (fill_clock_gettime(tp) < 0) {
167 log_err("fio: clock_gettime fails\n");
173 #ifdef ARCH_HAVE_CPU_CLOCK
175 uint64_t nsecs, t, multiples;
178 #ifdef CONFIG_TLS_THREAD
179 tv = &static_tv_valid;
181 tv = pthread_getspecific(tv_tls_key);
185 #ifdef ARCH_CPU_CLOCK_WRAPS
186 if (t < cycles_start && !cycles_wrap)
188 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
189 log_err("fio: double CPU clock wrap\n");
193 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
194 nsecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC * 1000;
197 multiples = t >> max_cycles_shift;
198 nsecs = multiples * nsecs_for_max_cycles;
199 nsecs += ((t & max_cycles_mask) * clock_mult) >> clock_shift;
202 tv->last_tv_valid = 1;
204 tp->tv_sec = nsecs / 1000000000ULL;
205 tp->tv_nsec = nsecs % 1000000000ULL;
210 log_err("fio: invalid clock source %d\n", fio_clock_source);
215 #ifdef FIO_DEBUG_TIME
216 void fio_gettime(struct timespec *tp, void *caller)
218 void fio_gettime(struct timespec *tp, void fio_unused *caller)
221 #ifdef FIO_DEBUG_TIME
223 caller = __builtin_return_address(0);
225 gtod_log_caller(caller);
227 if (fio_unlikely(fio_gettime_offload(tp)))
233 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
234 static unsigned long get_cycles_per_usec(void)
236 struct timespec s, e;
238 enum fio_cs old_cs = fio_clock_source;
241 #ifdef CONFIG_CLOCK_GETTIME
242 fio_clock_source = CS_CGETTIME;
244 fio_clock_source = CS_GTOD;
248 c_s = get_cpu_clock();
252 elapsed = utime_since(&s, &e);
253 if (elapsed >= 1280) {
254 c_e = get_cpu_clock();
259 fio_clock_source = old_cs;
260 return (c_e - c_s) / elapsed;
263 #define NR_TIME_ITERS 50
265 static int calibrate_cpu_clock(void)
267 double delta, mean, S;
268 uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
269 int i, samples, sft = 0;
270 unsigned long long tmp, max_ticks, max_mult;
272 cycles[0] = get_cycles_per_usec();
273 S = delta = mean = 0.0;
274 for (i = 0; i < NR_TIME_ITERS; i++) {
275 cycles[i] = get_cycles_per_usec();
276 delta = cycles[i] - mean;
278 mean += delta / (i + 1.0);
279 S += delta * (cycles[i] - mean);
284 * The most common platform clock breakage is returning zero
285 * indefinitely. Check for that and return failure.
287 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
290 S = sqrt(S / (NR_TIME_ITERS - 1.0));
293 maxc = samples = avg = 0;
294 for (i = 0; i < NR_TIME_ITERS; i++) {
295 double this = cycles[i];
297 minc = min(cycles[i], minc);
298 maxc = max(cycles[i], maxc);
300 if ((fmax(this, mean) - fmin(this, mean)) > S)
306 S /= (double) NR_TIME_ITERS;
308 for (i = 0; i < NR_TIME_ITERS; i++)
309 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
312 cycles_per_usec = avg;
313 dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
314 dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
315 (unsigned long long) minc,
316 (unsigned long long) maxc, mean, S);
318 max_ticks = MAX_CLOCK_SEC * cycles_per_usec * 1000000ULL;
319 max_mult = ULLONG_MAX / max_ticks;
320 dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, max_mult=%llu\n",
321 max_ticks, __builtin_clzll(max_ticks), max_mult);
324 * Find the largest shift count that will produce
325 * a multiplier that does not exceed max_mult
327 tmp = max_mult * cycles_per_usec / 1000;
331 dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
335 clock_mult = (1ULL << sft) * 1000 / cycles_per_usec;
336 dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift, clock_mult);
338 // Find the greatest power of 2 clock ticks that is less than the ticks in MAX_CLOCK_SEC_2STAGE
339 max_cycles_shift = max_cycles_mask = 0;
340 tmp = MAX_CLOCK_SEC * 1000000ULL * cycles_per_usec;
341 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
345 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
347 // if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_usec here we will
348 // have a discontinuity every (1ULL << max_cycles_shift) cycles
349 nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult) >> clock_shift;
351 // Use a bitmask to calculate ticks % (1ULL << max_cycles_shift)
352 for (tmp = 0; tmp < max_cycles_shift; tmp++)
353 max_cycles_mask |= 1ULL << tmp;
355 dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, nsecs_for_max_cycles=%llu, max_cycles_mask=%016llx\n",
356 max_cycles_shift, (1ULL << max_cycles_shift),
357 nsecs_for_max_cycles, max_cycles_mask);
359 cycles_start = get_cpu_clock();
360 dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
364 static int calibrate_cpu_clock(void)
366 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
372 #endif // ARCH_HAVE_CPU_CLOCK
374 #ifndef CONFIG_TLS_THREAD
375 void fio_local_clock_init(int is_thread)
379 t = calloc(1, sizeof(*t));
380 if (pthread_setspecific(tv_tls_key, t)) {
381 log_err("fio: can't set TLS key\n");
386 static void kill_tv_tls_key(void *data)
391 void fio_local_clock_init(int is_thread)
396 void fio_clock_init(void)
398 if (fio_clock_source == fio_clock_source_inited)
401 #ifndef CONFIG_TLS_THREAD
402 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
403 log_err("fio: can't create TLS key\n");
406 fio_clock_source_inited = fio_clock_source;
408 if (calibrate_cpu_clock())
412 * If the arch sets tsc_reliable != 0, then it must be good enough
413 * to use as THE clock source. For x86 CPUs, this means the TSC
414 * runs at a constant rate and is synced across CPU cores.
417 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
418 fio_clock_source = CS_CPUCLOCK;
419 } else if (fio_clock_source == CS_CPUCLOCK)
420 log_info("fio: clocksource=cpu may not be reliable\n");
423 uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
427 sec = e->tv_sec - s->tv_sec;
428 nsec = e->tv_nsec - s->tv_nsec;
429 if (sec > 0 && nsec < 0) {
431 nsec += 1000000000LL;
435 * time warp bug on some kernels?
437 if (sec < 0 || (sec == 0 && nsec < 0))
440 return nsec + (sec * 1000000000LL);
443 uint64_t utime_since(const struct timespec *s, const struct timespec *e)
447 sec = e->tv_sec - s->tv_sec;
448 usec = (e->tv_nsec - s->tv_nsec) / 1000;
449 if (sec > 0 && usec < 0) {
455 * time warp bug on some kernels?
457 if (sec < 0 || (sec == 0 && usec < 0))
460 return usec + (sec * 1000000);
463 uint64_t utime_since_now(const struct timespec *s)
466 #ifdef FIO_DEBUG_TIME
467 void *p = __builtin_return_address(0);
471 fio_gettime(&t, NULL);
474 return utime_since(s, &t);
477 uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
481 sec = e->tv_sec - s->tv_sec;
482 usec = (e->tv_usec - s->tv_usec);
483 if (sec > 0 && usec < 0) {
488 if (sec < 0 || (sec == 0 && usec < 0))
496 uint64_t mtime_since_now(const struct timespec *s)
499 #ifdef FIO_DEBUG_TIME
500 void *p = __builtin_return_address(0);
504 fio_gettime(&t, NULL);
507 return mtime_since(s, &t);
510 uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
514 sec = e->tv_sec - s->tv_sec;
515 usec = (e->tv_nsec - s->tv_nsec) / 1000;
516 if (sec > 0 && usec < 0) {
521 if (sec < 0 || (sec == 0 && usec < 0))
529 uint64_t time_since_now(const struct timespec *s)
531 return mtime_since_now(s) / 1000;
534 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
537 #define CLOCK_ENTRIES_DEBUG 100000
538 #define CLOCK_ENTRIES_TEST 10000
546 struct clock_thread {
550 pthread_mutex_t lock;
551 pthread_mutex_t started;
552 unsigned long nr_entries;
554 struct clock_entry *entries;
557 static inline uint32_t atomic32_inc_return(uint32_t *seq)
559 return 1 + __sync_fetch_and_add(seq, 1);
562 static void *clock_thread_fn(void *data)
564 struct clock_thread *t = data;
565 struct clock_entry *c;
566 os_cpu_mask_t cpu_mask;
568 unsigned long long first;
571 if (fio_cpuset_init(&cpu_mask)) {
574 log_err("clock cpuset init failed: %s\n", strerror(__err));
578 fio_cpu_set(&cpu_mask, t->cpu);
580 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
583 log_err("clock setaffinity failed: %s\n", strerror(__err));
587 pthread_mutex_lock(&t->lock);
588 pthread_mutex_unlock(&t->started);
590 first = get_cpu_clock();
593 for (i = 0; i < t->nr_entries; i++, c++) {
599 seq = atomic32_inc_return(t->seq);
602 tsc = get_cpu_clock();
603 } while (seq != *t->seq);
610 unsigned long long clocks;
612 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
613 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
618 * The most common platform clock breakage is returning zero
619 * indefinitely. Check for that and return failure.
621 if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
624 fio_cpuset_exit(&cpu_mask);
627 fio_cpuset_exit(&cpu_mask);
632 static int clock_cmp(const void *p1, const void *p2)
634 const struct clock_entry *c1 = p1;
635 const struct clock_entry *c2 = p2;
637 if (c1->seq == c2->seq)
638 log_err("cs: bug in atomic sequence!\n");
640 return c1->seq - c2->seq;
643 int fio_monotonic_clocktest(int debug)
645 struct clock_thread *cthreads;
646 unsigned int nr_cpus = cpus_online();
647 struct clock_entry *entries;
648 unsigned long nr_entries, tentries, failed = 0;
649 struct clock_entry *prev, *this;
654 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
657 fio_debug |= 1U << FD_TIME;
659 nr_entries = CLOCK_ENTRIES_DEBUG;
661 nr_entries = CLOCK_ENTRIES_TEST;
663 calibrate_cpu_clock();
667 fio_debug &= ~(1U << FD_TIME);
671 cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
672 tentries = nr_entries * nr_cpus;
673 entries = malloc(tentries * sizeof(struct clock_entry));
676 log_info("cs: Testing %u CPUs\n", nr_cpus);
678 for (i = 0; i < nr_cpus; i++) {
679 struct clock_thread *t = &cthreads[i];
684 t->nr_entries = nr_entries;
685 t->entries = &entries[i * nr_entries];
686 pthread_mutex_init(&t->lock, NULL);
687 pthread_mutex_init(&t->started, NULL);
688 pthread_mutex_lock(&t->lock);
689 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
696 for (i = 0; i < nr_cpus; i++) {
697 struct clock_thread *t = &cthreads[i];
699 pthread_mutex_lock(&t->started);
702 for (i = 0; i < nr_cpus; i++) {
703 struct clock_thread *t = &cthreads[i];
705 pthread_mutex_unlock(&t->lock);
708 for (i = 0; i < nr_cpus; i++) {
709 struct clock_thread *t = &cthreads[i];
712 pthread_join(t->thread, &ret);
720 log_err("Clocksource test: %lu threads failed\n", failed);
724 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
726 /* silence silly gcc */
728 for (failed = i = 0; i < tentries; i++) {
736 if (prev->tsc > this->tsc) {
737 uint64_t diff = prev->tsc - this->tsc;
744 log_info("cs: CPU clock mismatch (diff=%llu):\n",
745 (unsigned long long) diff);
746 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
747 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
756 log_info("cs: Failed: %lu\n", failed);
758 log_info("cs: Pass!\n");
765 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
767 int fio_monotonic_clocktest(int debug)
770 log_info("cs: current platform does not support CPU clocks\n");