#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long cycles_per_usec;
-int tsc_reliable = 0;
+static unsigned long inv_cycles_per_usec;
#endif
+int tsc_reliable = 0;
struct tv_valid {
struct timeval last_tv;
+ uint64_t last_cycles;
int last_tv_valid;
- unsigned long last_cycles;
};
+#ifdef CONFIG_TLS_THREAD
+static struct tv_valid __thread static_tv_valid;
+#else
static pthread_key_t tv_tls_key;
+#endif
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
#endif /* FIO_DEBUG_TIME */
+#ifdef CONFIG_CLOCK_GETTIME
static int fill_clock_gettime(struct timespec *ts)
{
-#ifdef FIO_HAVE_CLOCK_MONOTONIC
+#ifdef CONFIG_CLOCK_MONOTONIC
return clock_gettime(CLOCK_MONOTONIC, ts);
#else
return clock_gettime(CLOCK_REALTIME, ts);
#endif
}
-
-#ifdef FIO_DEBUG_TIME
-void fio_gettime(struct timeval *tp, void *caller)
-#else
-void fio_gettime(struct timeval *tp, void fio_unused *caller)
#endif
+
+static void *__fio_gettime(struct timeval *tp)
{
struct tv_valid *tv;
-#ifdef FIO_DEBUG_TIME
- if (!caller)
- caller = __builtin_return_address(0);
-
- gtod_log_caller(caller);
-#endif
- if (fio_tv) {
- memcpy(tp, fio_tv, sizeof(*tp));
- return;
- }
-
+#ifdef CONFIG_TLS_THREAD
+ tv = &static_tv_valid;
+#else
tv = pthread_getspecific(tv_tls_key);
+#endif
switch (fio_clock_source) {
+#ifdef CONFIG_GETTIMEOFDAY
case CS_GTOD:
gettimeofday(tp, NULL);
break;
+#endif
+#ifdef CONFIG_CLOCK_GETTIME
case CS_CGETTIME: {
struct timespec ts;
tp->tv_usec = ts.tv_nsec / 1000;
break;
}
+#endif
#ifdef ARCH_HAVE_CPU_CLOCK
case CS_CPUCLOCK: {
- unsigned long long usecs, t;
+ uint64_t usecs, t;
t = get_cpu_clock();
if (tv && t < tv->last_cycles) {
} else if (tv)
tv->last_cycles = t;
- usecs = t / cycles_per_usec;
+ usecs = (t * inv_cycles_per_usec) / 16777216UL;
tp->tv_sec = usecs / 1000000;
tp->tv_usec = usecs % 1000000;
break;
break;
}
+ return tv;
+}
+
+#ifdef FIO_DEBUG_TIME
+void fio_gettime(struct timeval *tp, void *caller)
+#else
+void fio_gettime(struct timeval *tp, void fio_unused *caller)
+#endif
+{
+ struct tv_valid *tv;
+
+#ifdef FIO_DEBUG_TIME
+ if (!caller)
+ caller = __builtin_return_address(0);
+
+ gtod_log_caller(caller);
+#endif
+ if (fio_tv) {
+ memcpy(tp, fio_tv, sizeof(*tp));
+ return;
+ }
+
+ tv = __fio_gettime(tp);
+
/*
* If Linux is using the tsc clock on non-synced processors,
* sometimes time can appear to drift backwards. Fix that up.
#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long get_cycles_per_usec(void)
{
- struct timespec ts;
struct timeval s, e;
- unsigned long long c_s, c_e;
+ uint64_t c_s, c_e;
+ enum fio_cs old_cs = fio_clock_source;
- fill_clock_gettime(&ts);
- s.tv_sec = ts.tv_sec;
- s.tv_usec = ts.tv_nsec / 1000;
+#ifdef CONFIG_CLOCK_GETTIME
+ fio_clock_source = CS_CGETTIME;
+#else
+ fio_clock_source = CS_GTOD;
+#endif
+ __fio_gettime(&s);
c_s = get_cpu_clock();
do {
- unsigned long long elapsed;
+ uint64_t elapsed;
- fill_clock_gettime(&ts);
- e.tv_sec = ts.tv_sec;
- e.tv_usec = ts.tv_nsec / 1000;
+ __fio_gettime(&e);
elapsed = utime_since(&s, &e);
if (elapsed >= 1280) {
}
} while (1);
+ fio_clock_source = old_cs;
return (c_e - c_s + 127) >> 7;
}
static void calibrate_cpu_clock(void)
{
double delta, mean, S;
- unsigned long avg, cycles[NR_TIME_ITERS];
+ uint64_t avg, cycles[NR_TIME_ITERS];
int i, samples;
cycles[0] = get_cycles_per_usec();
dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
avg /= samples;
- avg = (avg + 9) / 10;
+ avg = (avg + 5) / 10;
dprint(FD_TIME, "avg: %lu\n", avg);
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
+ inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
}
#else
static void calibrate_cpu_clock(void)
}
#endif
+#ifndef CONFIG_TLS_THREAD
void fio_local_clock_init(int is_thread)
{
struct tv_valid *t;
{
free(data);
}
+#else
+void fio_local_clock_init(int is_thread)
+{
+}
+#endif
void fio_clock_init(void)
{
if (fio_clock_source == fio_clock_source_inited)
return;
+#ifndef CONFIG_TLS_THREAD
if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
log_err("fio: can't create TLS key\n");
+#endif
fio_clock_source_inited = fio_clock_source;
calibrate_cpu_clock();
log_info("fio: clocksource=cpu may not be reliable\n");
}
-unsigned long long utime_since(struct timeval *s, struct timeval *e)
+uint64_t utime_since(struct timeval *s, struct timeval *e)
{
long sec, usec;
- unsigned long long ret;
+ uint64_t ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
return ret;
}
-unsigned long long utime_since_now(struct timeval *s)
+uint64_t utime_since_now(struct timeval *s)
{
struct timeval t;
return utime_since(s, &t);
}
-unsigned long mtime_since(struct timeval *s, struct timeval *e)
+uint64_t mtime_since(struct timeval *s, struct timeval *e)
{
long sec, usec, ret;
return ret;
}
-unsigned long mtime_since_now(struct timeval *s)
+uint64_t mtime_since_now(struct timeval *s)
{
struct timeval t;
void *p = __builtin_return_address(0);
return mtime_since(s, &t);
}
-unsigned long time_since_now(struct timeval *s)
+uint64_t time_since_now(struct timeval *s)
{
return mtime_since_now(s) / 1000;
}
-#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)
+#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
+ defined(CONFIG_SFAA)
#define CLOCK_ENTRIES 100000
struct clock_entry {
- unsigned long seq;
- unsigned long tsc;
- unsigned long cpu;
+ uint64_t seq;
+ uint64_t tsc;
+ uint64_t cpu;
};
struct clock_thread {
unsigned int nr_cpus = cpus_online();
struct clock_entry *entries;
unsigned long tentries, failed;
+ struct clock_entry *prev, *this;
uint64_t seq = 0;
int i;
+ log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
+
fio_debug |= 1U << FD_TIME;
calibrate_cpu_clock();
fio_debug &= ~(1U << FD_TIME);
qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
for (failed = i = 0; i < tentries; i++) {
- struct clock_entry *prev, *this = &entries[i];
+ this = &entries[i];
if (!i) {
prev = this;