+ fio_clock_source = old_cs;
+ return (c_e - c_s + 127) >> 7;
+}
+
+#define NR_TIME_ITERS 50
+
+static int calibrate_cpu_clock(void)
+{
+ double delta, mean, S;
+ uint64_t avg, cycles[NR_TIME_ITERS];
+ int i, samples;
+
+ cycles[0] = get_cycles_per_usec();
+ S = delta = mean = 0.0;
+ for (i = 0; i < NR_TIME_ITERS; i++) {
+ cycles[i] = get_cycles_per_usec();
+ delta = cycles[i] - mean;
+ if (delta) {
+ mean += delta / (i + 1.0);
+ S += delta * (cycles[i] - mean);
+ }
+ }
+
+ /*
+ * The most common platform clock breakage is returning zero
+ * indefinitely. Check for that and return failure.
+ */
+ if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
+ return 1;
+
+ S = sqrt(S / (NR_TIME_ITERS - 1.0));
+
+ samples = avg = 0;
+ for (i = 0; i < NR_TIME_ITERS; i++) {
+ double this = cycles[i];
+
+ if ((fmax(this, mean) - fmin(this, mean)) > S)
+ continue;
+ samples++;
+ avg += this;
+ }
+
+ S /= (double) NR_TIME_ITERS;
+ mean /= 10.0;
+
+ for (i = 0; i < NR_TIME_ITERS; i++)
+ dprint(FD_TIME, "cycles[%d]=%llu\n", i,
+ (unsigned long long) cycles[i] / 10);
+
+ avg /= samples;
+ avg = (avg + 5) / 10;
+ dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
+ dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
+
+ cycles_per_usec = avg;
+ inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
+ return 0;
+}
+#else
+static int calibrate_cpu_clock(void)
+{
+#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
+ return 0;
+#else
+ return 1;
+#endif
+}
+#endif // ARCH_HAVE_CPU_CLOCK
+
+#ifndef CONFIG_TLS_THREAD
+void fio_local_clock_init(int is_thread)
+{
+ struct tv_valid *t;
+
+ t = calloc(1, sizeof(*t));
+ if (pthread_setspecific(tv_tls_key, t))
+ log_err("fio: can't set TLS key\n");
+}
+
+static void kill_tv_tls_key(void *data)
+{
+ free(data);
+}
+#else
+void fio_local_clock_init(int is_thread)
+{
+}
+#endif
+
+void fio_clock_init(void)
+{
+ if (fio_clock_source == fio_clock_source_inited)
+ return;
+
+#ifndef CONFIG_TLS_THREAD
+ if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
+ log_err("fio: can't create TLS key\n");
+#endif
+
+ fio_clock_source_inited = fio_clock_source;
+
+ if (calibrate_cpu_clock())
+ tsc_reliable = 0;
+
+ /*
+ * If the arch sets tsc_reliable != 0, then it must be good enough
+ * to use as THE clock source. For x86 CPUs, this means the TSC
+ * runs at a constant rate and is synced across CPU cores.
+ */
+ if (tsc_reliable) {
+ if (!fio_clock_source_set)
+ fio_clock_source = CS_CPUCLOCK;
+ } else if (fio_clock_source == CS_CPUCLOCK)
+ log_info("fio: clocksource=cpu may not be reliable\n");
+}
+
+uint64_t utime_since(const struct timeval *s, const struct timeval *e)
+{
+ long sec, usec;
+ uint64_t ret;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = e->tv_usec - s->tv_usec;
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ /*
+ * time warp bug on some kernels?
+ */
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ ret = sec * 1000000ULL + usec;
+
+ return ret;
+}
+
+uint64_t utime_since_now(const struct timeval *s)
+{
+ struct timeval t;
+
+ fio_gettime(&t, NULL);
+ return utime_since(s, &t);
+}
+
+uint64_t mtime_since(const struct timeval *s, const struct timeval *e)
+{
+ long sec, usec, ret;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = e->tv_usec - s->tv_usec;
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ sec *= 1000UL;
+ usec /= 1000UL;
+ ret = sec + usec;
+
+ return ret;
+}
+
+uint64_t mtime_since_now(const struct timeval *s)
+{
+ struct timeval t;
+ void *p = __builtin_return_address(0);
+
+ fio_gettime(&t, p);
+ return mtime_since(s, &t);
+}
+
+uint64_t time_since_now(const struct timeval *s)
+{
+ return mtime_since_now(s) / 1000;
+}
+
+#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
+ defined(CONFIG_SFAA)
+
+#define CLOCK_ENTRIES 100000
+
+struct clock_entry {
+ uint32_t seq;
+ uint32_t cpu;
+ uint64_t tsc;
+};
+
+struct clock_thread {
+ pthread_t thread;
+ int cpu;
+ pthread_mutex_t lock;
+ pthread_mutex_t started;
+ uint32_t *seq;
+ struct clock_entry *entries;
+};
+
+static inline uint32_t atomic32_inc_return(uint32_t *seq)
+{
+ return 1 + __sync_fetch_and_add(seq, 1);
+}
+
+static void *clock_thread_fn(void *data)
+{
+ struct clock_thread *t = data;
+ struct clock_entry *c;
+ os_cpu_mask_t cpu_mask;
+ uint32_t last_seq;
+ int i;
+
+ memset(&cpu_mask, 0, sizeof(cpu_mask));
+ fio_cpu_set(&cpu_mask, t->cpu);
+
+ if (fio_setaffinity(gettid(), cpu_mask) == -1) {
+ log_err("clock setaffinity failed\n");
+ return (void *) 1;
+ }
+
+ pthread_mutex_lock(&t->lock);
+ pthread_mutex_unlock(&t->started);
+
+ last_seq = 0;
+ c = &t->entries[0];
+ for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
+ uint32_t seq;
+ uint64_t tsc;
+
+ c->cpu = t->cpu;
+ do {
+ seq = atomic32_inc_return(t->seq);
+ if (seq < last_seq)
+ break;
+ tsc = get_cpu_clock();
+ } while (seq != *t->seq);
+
+ c->seq = seq;
+ c->tsc = tsc;