+ S /= (double) NR_TIME_ITERS;
+
+ for (i = 0; i < NR_TIME_ITERS; i++)
+ dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
+
+ avg /= samples;
+ cycles_per_msec = avg;
+ dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
+ dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
+ (unsigned long long) minc,
+ (unsigned long long) maxc, mean, S);
+
+ max_ticks = MAX_CLOCK_SEC * cycles_per_msec * 1000ULL;
+ max_mult = ULLONG_MAX / max_ticks;
+ dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, "
+ "max_mult=%llu\n", max_ticks,
+ __builtin_clzll(max_ticks), max_mult);
+
+ /*
+ * Find the largest shift count that will produce
+ * a multiplier that does not exceed max_mult
+ */
+ tmp = max_mult * cycles_per_msec / 1000000;
+ while (tmp > 1) {
+ tmp >>= 1;
+ sft++;
+ dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
+ }
+
+ clock_shift = sft;
+ clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
+ dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift,
+ clock_mult);
+
+ /*
+ * Find the greatest power of 2 clock ticks that is less than the
+ * ticks in MAX_CLOCK_SEC_2STAGE
+ */
+ max_cycles_shift = max_cycles_mask = 0;
+ tmp = MAX_CLOCK_SEC * 1000ULL * cycles_per_msec;
+ dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp,
+ max_cycles_shift);
+ while (tmp > 1) {
+ tmp >>= 1;
+ max_cycles_shift++;
+ dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
+ }
+ /*
+ * if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec
+ * here we will have a discontinuity every
+ * (1ULL << max_cycles_shift) cycles
+ */
+ nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult)
+ >> clock_shift;
+
+ /* Use a bitmask to calculate ticks % (1ULL << max_cycles_shift) */
+ for (tmp = 0; tmp < max_cycles_shift; tmp++)
+ max_cycles_mask |= 1ULL << tmp;
+
+ dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, "
+ "nsecs_for_max_cycles=%llu, "
+ "max_cycles_mask=%016llx\n",
+ max_cycles_shift, (1ULL << max_cycles_shift),
+ nsecs_for_max_cycles, max_cycles_mask);
+
+ cycles_start = get_cpu_clock();
+ dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
+ return 0;
+}
+#else
+static int calibrate_cpu_clock(void)
+{
+#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
+ return 0;
+#else
+ return 1;
+#endif
+}
+#endif // ARCH_HAVE_CPU_CLOCK
+
+#ifndef CONFIG_TLS_THREAD
+void fio_local_clock_init(int is_thread)
+{
+ struct tv_valid *t;
+
+ t = calloc(1, sizeof(*t));
+ if (pthread_setspecific(tv_tls_key, t)) {
+ log_err("fio: can't set TLS key\n");
+ assert(0);
+ }
+}
+
+static void kill_tv_tls_key(void *data)
+{
+ free(data);
+}
+#else
+void fio_local_clock_init(int is_thread)
+{
+}
+#endif
+
+void fio_clock_init(void)
+{
+ if (fio_clock_source == fio_clock_source_inited)
+ return;
+
+#ifndef CONFIG_TLS_THREAD
+ if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
+ log_err("fio: can't create TLS key\n");
+#endif
+
+ fio_clock_source_inited = fio_clock_source;
+
+ if (calibrate_cpu_clock())
+ tsc_reliable = false;
+
+ /*
+ * If the arch sets tsc_reliable != 0, then it must be good enough
+ * to use as THE clock source. For x86 CPUs, this means the TSC
+ * runs at a constant rate and is synced across CPU cores.
+ */
+ if (tsc_reliable) {
+ if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
+ fio_clock_source = CS_CPUCLOCK;
+ } else if (fio_clock_source == CS_CPUCLOCK)
+ log_info("fio: clocksource=cpu may not be reliable\n");
+ dprint(FD_TIME, "gettime: clocksource=%d\n", (int) fio_clock_source);
+}
+
+uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
+{
+ int64_t sec, nsec;
+
+ sec = e->tv_sec - s->tv_sec;
+ nsec = e->tv_nsec - s->tv_nsec;
+ if (sec > 0 && nsec < 0) {
+ sec--;
+ nsec += 1000000000LL;
+ }
+
+ /*
+ * time warp bug on some kernels?
+ */
+ if (sec < 0 || (sec == 0 && nsec < 0))
+ return 0;
+
+ return nsec + (sec * 1000000000LL);
+}
+
+uint64_t ntime_since_now(const struct timespec *s)
+{
+ struct timespec now;
+
+ fio_gettime(&now, NULL);
+ return ntime_since(s, &now);
+}
+
+uint64_t utime_since(const struct timespec *s, const struct timespec *e)
+{
+ int64_t sec, usec;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = (e->tv_nsec - s->tv_nsec) / 1000;
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ /*
+ * time warp bug on some kernels?
+ */
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ return usec + (sec * 1000000);
+}
+
+uint64_t utime_since_now(const struct timespec *s)
+{
+ struct timespec t;
+#ifdef FIO_DEBUG_TIME
+ void *p = __builtin_return_address(0);
+
+ fio_gettime(&t, p);
+#else
+ fio_gettime(&t, NULL);
+#endif
+
+ return utime_since(s, &t);
+}
+
+uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
+{
+ int64_t sec, usec;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = (e->tv_usec - s->tv_usec);
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ sec *= 1000;
+ usec /= 1000;
+ return sec + usec;
+}
+
+uint64_t mtime_since_now(const struct timespec *s)
+{
+ struct timespec t;
+#ifdef FIO_DEBUG_TIME
+ void *p = __builtin_return_address(0);
+
+ fio_gettime(&t, p);
+#else
+ fio_gettime(&t, NULL);
+#endif
+
+ return mtime_since(s, &t);
+}
+
+uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
+{
+ int64_t sec, usec;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = (e->tv_nsec - s->tv_nsec) / 1000;
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ sec *= 1000;
+ usec /= 1000;
+ return sec + usec;
+}
+
+uint64_t time_since_now(const struct timespec *s)
+{
+ return mtime_since_now(s) / 1000;
+}
+
+#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
+ defined(CONFIG_SYNC_SYNC) && defined(CONFIG_CMP_SWAP)
+
+#define CLOCK_ENTRIES_DEBUG 100000
+#define CLOCK_ENTRIES_TEST 1000
+
+struct clock_entry {
+ uint32_t seq;
+ uint32_t cpu;
+ uint64_t tsc;
+};
+
+struct clock_thread {
+ pthread_t thread;
+ int cpu;
+ int debug;
+ pthread_mutex_t lock;
+ pthread_mutex_t started;
+ unsigned long nr_entries;
+ uint32_t *seq;
+ struct clock_entry *entries;
+};
+
+static inline uint32_t atomic32_compare_and_swap(uint32_t *ptr, uint32_t old,
+ uint32_t new)
+{
+ return __sync_val_compare_and_swap(ptr, old, new);
+}
+
+static void *clock_thread_fn(void *data)
+{
+ struct clock_thread *t = data;
+ struct clock_entry *c;
+ os_cpu_mask_t cpu_mask;
+ unsigned long long first;
+ int i;
+
+ if (fio_cpuset_init(&cpu_mask)) {
+ int __err = errno;
+
+ log_err("clock cpuset init failed: %s\n", strerror(__err));
+ goto err_out;
+ }
+
+ fio_cpu_set(&cpu_mask, t->cpu);
+
+ if (fio_setaffinity(gettid(), cpu_mask) == -1) {
+ int __err = errno;
+
+ log_err("clock setaffinity failed: %s\n", strerror(__err));
+ goto err;
+ }
+
+ pthread_mutex_lock(&t->lock);
+ pthread_mutex_unlock(&t->started);
+
+ first = get_cpu_clock();
+ c = &t->entries[0];
+ for (i = 0; i < t->nr_entries; i++, c++) {
+ uint32_t seq;
+ uint64_t tsc;
+
+ c->cpu = t->cpu;
+ do {
+ seq = *t->seq;
+ if (seq == UINT_MAX)
+ break;
+ __sync_synchronize();
+ tsc = get_cpu_clock();
+ } while (seq != atomic32_compare_and_swap(t->seq, seq, seq + 1));
+
+ if (seq == UINT_MAX)
+ break;
+
+ c->seq = seq;
+ c->tsc = tsc;
+ }
+
+ if (t->debug) {
+ unsigned long long clocks;