#include "smalloc.h"
#include "hash.h"
+#include "os/os.h"
#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long cycles_per_usec;
-static unsigned long last_cycles;
+static unsigned long inv_cycles_per_usec;
+#endif
int tsc_reliable = 0;
+
+struct tv_valid {
+ struct timeval last_tv;
+ uint64_t last_cycles;
+ int last_tv_valid;
+};
+#ifdef CONFIG_TLS_THREAD
+static __thread struct tv_valid static_tv_valid;
+#else
+static pthread_key_t tv_tls_key;
#endif
-static struct timeval last_tv;
-static int last_tv_valid;
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
+enum fio_cs fio_clock_source_inited = CS_INVAL;
#ifdef FIO_DEBUG_TIME
#endif /* FIO_DEBUG_TIME */
-#ifdef FIO_DEBUG_TIME
-void fio_gettime(struct timeval *tp, void *caller)
+#ifdef CONFIG_CLOCK_GETTIME
+static int fill_clock_gettime(struct timespec *ts)
+{
+#ifdef CONFIG_CLOCK_MONOTONIC
+ return clock_gettime(CLOCK_MONOTONIC, ts);
#else
-void fio_gettime(struct timeval *tp, void fio_unused *caller)
+ return clock_gettime(CLOCK_REALTIME, ts);
+#endif
+}
#endif
+
+static void *__fio_gettime(struct timeval *tp)
{
-#ifdef FIO_DEBUG_TIME
- if (!caller)
- caller = __builtin_return_address(0);
+ struct tv_valid *tv;
- gtod_log_caller(caller);
+#ifdef CONFIG_TLS_THREAD
+ tv = &static_tv_valid;
+#else
+ tv = pthread_getspecific(tv_tls_key);
#endif
- if (fio_tv) {
- memcpy(tp, fio_tv, sizeof(*tp));
- return;
- }
switch (fio_clock_source) {
+#ifdef CONFIG_GETTIMEOFDAY
case CS_GTOD:
gettimeofday(tp, NULL);
break;
+#endif
+#ifdef CONFIG_CLOCK_GETTIME
case CS_CGETTIME: {
struct timespec ts;
-#ifdef FIO_HAVE_CLOCK_MONOTONIC
- if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) {
-#else
- if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
-#endif
+ if (fill_clock_gettime(&ts) < 0) {
log_err("fio: clock_gettime fails\n");
assert(0);
}
tp->tv_usec = ts.tv_nsec / 1000;
break;
}
+#endif
#ifdef ARCH_HAVE_CPU_CLOCK
case CS_CPUCLOCK: {
- unsigned long long usecs, t;
+ uint64_t usecs, t;
t = get_cpu_clock();
- if (t < last_cycles) {
+ if (tv && t < tv->last_cycles) {
dprint(FD_TIME, "CPU clock going back in time\n");
- t = last_cycles;
- }
+ t = tv->last_cycles;
+ } else if (tv)
+ tv->last_cycles = t;
- usecs = t / cycles_per_usec;
+ usecs = (t * inv_cycles_per_usec) / 16777216UL;
tp->tv_sec = usecs / 1000000;
tp->tv_usec = usecs % 1000000;
- last_cycles = t;
break;
}
#endif
break;
}
+ return tv;
+}
+
+#ifdef FIO_DEBUG_TIME
+void fio_gettime(struct timeval *tp, void *caller)
+#else
+void fio_gettime(struct timeval *tp, void fio_unused *caller)
+#endif
+{
+ struct tv_valid *tv;
+
+#ifdef FIO_DEBUG_TIME
+ if (!caller)
+ caller = __builtin_return_address(0);
+
+ gtod_log_caller(caller);
+#endif
+ if (fio_tv) {
+ memcpy(tp, fio_tv, sizeof(*tp));
+ return;
+ }
+
+ tv = __fio_gettime(tp);
+
/*
* If Linux is using the tsc clock on non-synced processors,
* sometimes time can appear to drift backwards. Fix that up.
*/
- if (last_tv_valid) {
- if (tp->tv_sec < last_tv.tv_sec)
- tp->tv_sec = last_tv.tv_sec;
- else if (last_tv.tv_sec == tp->tv_sec &&
- tp->tv_usec < last_tv.tv_usec)
- tp->tv_usec = last_tv.tv_usec;
+ if (tv) {
+ if (tv->last_tv_valid) {
+ if (tp->tv_sec < tv->last_tv.tv_sec)
+ tp->tv_sec = tv->last_tv.tv_sec;
+ else if (tv->last_tv.tv_sec == tp->tv_sec &&
+ tp->tv_usec < tv->last_tv.tv_usec)
+ tp->tv_usec = tv->last_tv.tv_usec;
+ }
+ tv->last_tv_valid = 1;
+ memcpy(&tv->last_tv, tp, sizeof(*tp));
}
- last_tv_valid = 1;
- memcpy(&last_tv, tp, sizeof(*tp));
}
#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long get_cycles_per_usec(void)
{
struct timeval s, e;
- unsigned long long c_s, c_e;
+ uint64_t c_s, c_e;
+ enum fio_cs old_cs = fio_clock_source;
+
+#ifdef CONFIG_CLOCK_GETTIME
+ fio_clock_source = CS_CGETTIME;
+#else
+ fio_clock_source = CS_GTOD;
+#endif
+ __fio_gettime(&s);
- gettimeofday(&s, NULL);
c_s = get_cpu_clock();
do {
- unsigned long long elapsed;
+ uint64_t elapsed;
+
+ __fio_gettime(&e);
- gettimeofday(&e, NULL);
elapsed = utime_since(&s, &e);
- if (elapsed >= 10) {
+ if (elapsed >= 1280) {
c_e = get_cpu_clock();
break;
}
} while (1);
- return c_e - c_s;
+ fio_clock_source = old_cs;
+ return (c_e - c_s + 127) >> 7;
}
#define NR_TIME_ITERS 50
-static void calibrate_cpu_clock(void)
+static int calibrate_cpu_clock(void)
{
double delta, mean, S;
- unsigned long avg, cycles[NR_TIME_ITERS];
+ uint64_t avg, cycles[NR_TIME_ITERS];
int i, samples;
cycles[0] = get_cycles_per_usec();
}
}
+ /*
+ * The most common platform clock breakage is returning zero
+ * indefinitely. Check for that and return failure.
+ */
+ if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
+ return 1;
+
S = sqrt(S / (NR_TIME_ITERS - 1.0));
samples = avg = 0;
}
S /= (double) NR_TIME_ITERS;
- mean /= (double) NR_TIME_ITERS;
+ mean /= 10.0;
for (i = 0; i < NR_TIME_ITERS; i++)
- dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
+ dprint(FD_TIME, "cycles[%d]=%llu\n", i,
+ (unsigned long long) cycles[i] / 10);
- avg /= (samples * 10);
- dprint(FD_TIME, "avg: %lu\n", avg);
+ avg /= samples;
+ avg = (avg + 5) / 10;
+ dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
+ inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
+ return 0;
}
#else
-static void calibrate_cpu_clock(void)
+static int calibrate_cpu_clock(void)
+{
+ return 1;
+}
+#endif
+
+#ifndef CONFIG_TLS_THREAD
+void fio_local_clock_init(int is_thread)
+{
+ struct tv_valid *t;
+
+ t = calloc(sizeof(*t), 1);
+ if (pthread_setspecific(tv_tls_key, t))
+ log_err("fio: can't set TLS key\n");
+}
+
+static void kill_tv_tls_key(void *data)
+{
+ free(data);
+}
+#else
+void fio_local_clock_init(int is_thread)
{
}
#endif
void fio_clock_init(void)
{
- last_tv_valid = 0;
- calibrate_cpu_clock();
+ if (fio_clock_source == fio_clock_source_inited)
+ return;
+
+#ifndef CONFIG_TLS_THREAD
+ if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
+ log_err("fio: can't create TLS key\n");
+#endif
+
+ fio_clock_source_inited = fio_clock_source;
+
+ if (calibrate_cpu_clock())
+ tsc_reliable = 0;
/*
* If the arch sets tsc_reliable != 0, then it must be good enough
log_info("fio: clocksource=cpu may not be reliable\n");
}
-unsigned long long utime_since(struct timeval *s, struct timeval *e)
+uint64_t utime_since(struct timeval *s, struct timeval *e)
{
long sec, usec;
- unsigned long long ret;
+ uint64_t ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
return ret;
}
-unsigned long long utime_since_now(struct timeval *s)
+uint64_t utime_since_now(struct timeval *s)
{
struct timeval t;
return utime_since(s, &t);
}
-unsigned long mtime_since(struct timeval *s, struct timeval *e)
+uint64_t mtime_since(struct timeval *s, struct timeval *e)
{
long sec, usec, ret;
return ret;
}
-unsigned long mtime_since_now(struct timeval *s)
+uint64_t mtime_since_now(struct timeval *s)
{
struct timeval t;
void *p = __builtin_return_address(0);
return mtime_since(s, &t);
}
-unsigned long time_since_now(struct timeval *s)
+uint64_t time_since_now(struct timeval *s)
{
return mtime_since_now(s) / 1000;
}
+
+#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
+ defined(CONFIG_SFAA)
+
+#define CLOCK_ENTRIES 100000
+
+struct clock_entry {
+ uint32_t seq;
+ uint32_t cpu;
+ uint64_t tsc;
+};
+
+struct clock_thread {
+ pthread_t thread;
+ int cpu;
+ pthread_mutex_t lock;
+ pthread_mutex_t started;
+ uint32_t *seq;
+ struct clock_entry *entries;
+};
+
+static inline uint32_t atomic32_inc_return(uint32_t *seq)
+{
+ return 1 + __sync_fetch_and_add(seq, 1);
+}
+
+static void *clock_thread_fn(void *data)
+{
+ struct clock_thread *t = data;
+ struct clock_entry *c;
+ os_cpu_mask_t cpu_mask;
+ uint32_t last_seq;
+ int i;
+
+ memset(&cpu_mask, 0, sizeof(cpu_mask));
+ fio_cpu_set(&cpu_mask, t->cpu);
+
+ if (fio_setaffinity(gettid(), cpu_mask) == -1) {
+ log_err("clock setaffinity failed\n");
+ return (void *) 1;
+ }
+
+ pthread_mutex_lock(&t->lock);
+ pthread_mutex_unlock(&t->started);
+
+ last_seq = 0;
+ c = &t->entries[0];
+ for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
+ uint32_t seq;
+ uint64_t tsc;
+
+ c->cpu = t->cpu;
+ do {
+ seq = atomic32_inc_return(t->seq);
+ if (seq < last_seq)
+ break;
+ tsc = get_cpu_clock();
+ } while (seq != *t->seq);
+
+ c->seq = seq;
+ c->tsc = tsc;
+ }
+
+ log_info("cs: cpu%3d: %llu clocks seen\n", t->cpu,
+ (unsigned long long) t->entries[i - 1].tsc - t->entries[0].tsc);
+
+ /*
+ * The most common platform clock breakage is returning zero
+ * indefinitely. Check for that and return failure.
+ */
+ if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
+ return (void *) 1;
+
+ return NULL;
+}
+
+static int clock_cmp(const void *p1, const void *p2)
+{
+ const struct clock_entry *c1 = p1;
+ const struct clock_entry *c2 = p2;
+
+ if (c1->seq == c2->seq)
+ log_err("cs: bug in atomic sequence!\n");
+
+ return c1->seq - c2->seq;
+}
+
+int fio_monotonic_clocktest(void)
+{
+ struct clock_thread *threads;
+ unsigned int nr_cpus = cpus_online();
+ struct clock_entry *entries;
+ unsigned long tentries, failed;
+ struct clock_entry *prev, *this;
+ uint32_t seq = 0;
+ int i;
+
+ log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
+
+ fio_debug |= 1U << FD_TIME;
+ calibrate_cpu_clock();
+ fio_debug &= ~(1U << FD_TIME);
+
+ threads = malloc(nr_cpus * sizeof(struct clock_thread));
+ tentries = CLOCK_ENTRIES * nr_cpus;
+ entries = malloc(tentries * sizeof(struct clock_entry));
+
+ log_info("cs: Testing %u CPUs\n", nr_cpus);
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct clock_thread *t = &threads[i];
+
+ t->cpu = i;
+ t->seq = &seq;
+ t->entries = &entries[i * CLOCK_ENTRIES];
+ pthread_mutex_init(&t->lock, NULL);
+ pthread_mutex_init(&t->started, NULL);
+ pthread_mutex_lock(&t->lock);
+ pthread_create(&t->thread, NULL, clock_thread_fn, t);
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct clock_thread *t = &threads[i];
+
+ pthread_mutex_lock(&t->started);
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct clock_thread *t = &threads[i];
+
+ pthread_mutex_unlock(&t->lock);
+ }
+
+ for (failed = i = 0; i < nr_cpus; i++) {
+ struct clock_thread *t = &threads[i];
+ void *ret;
+
+ pthread_join(t->thread, &ret);
+ if (ret)
+ failed++;
+ }
+ free(threads);
+
+ if (failed) {
+ log_err("Clocksource test: %lu threads failed\n", failed);
+ goto err;
+ }
+
+ qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
+
+ for (failed = i = 0; i < tentries; i++) {
+ this = &entries[i];
+
+ if (!i) {
+ prev = this;
+ continue;
+ }
+
+ if (prev->tsc > this->tsc) {
+ uint64_t diff = prev->tsc - this->tsc;
+
+ log_info("cs: CPU clock mismatch (diff=%llu):\n",
+ (unsigned long long) diff);
+ log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
+ log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
+ failed++;
+ }
+
+ prev = this;
+ }
+
+ if (failed)
+ log_info("cs: Failed: %lu\n", failed);
+ else
+ log_info("cs: Pass!\n");
+
+err:
+ free(entries);
+ return !!failed;
+}
+
+#else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
+
+int fio_monotonic_clocktest(void)
+{
+ log_info("cs: current platform does not support CPU clocks\n");
+ return 0;
+}
+
+#endif