#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long cycles_per_usec;
-static unsigned long last_cycles;
+static unsigned long inv_cycles_per_usec;
+#endif
int tsc_reliable = 0;
+
+struct tv_valid {
+ struct timeval last_tv;
+ int last_tv_valid;
+ unsigned long last_cycles;
+};
+#ifdef CONFIG_TLS_THREAD
+static struct tv_valid __thread static_tv_valid;
+#else
+static pthread_key_t tv_tls_key;
#endif
-static struct timeval last_tv;
-static int last_tv_valid;
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
#endif /* FIO_DEBUG_TIME */
-#ifdef FIO_DEBUG_TIME
-void fio_gettime(struct timeval *tp, void *caller)
+#ifdef CONFIG_CLOCK_GETTIME
+static int fill_clock_gettime(struct timespec *ts)
+{
+#ifdef CONFIG_CLOCK_MONOTONIC
+ return clock_gettime(CLOCK_MONOTONIC, ts);
#else
-void fio_gettime(struct timeval *tp, void fio_unused *caller)
+ return clock_gettime(CLOCK_REALTIME, ts);
#endif
+}
+#endif
+
+static void *__fio_gettime(struct timeval *tp)
{
-#ifdef FIO_DEBUG_TIME
- if (!caller)
- caller = __builtin_return_address(0);
+ struct tv_valid *tv;
- gtod_log_caller(caller);
+#ifdef CONFIG_TLS_THREAD
+ tv = &static_tv_valid;
+#else
+ tv = pthread_getspecific(tv_tls_key);
#endif
- if (fio_tv) {
- memcpy(tp, fio_tv, sizeof(*tp));
- return;
- }
switch (fio_clock_source) {
+#ifdef CONFIG_GETTIMEOFDAY
case CS_GTOD:
gettimeofday(tp, NULL);
break;
+#endif
+#ifdef CONFIG_CLOCK_GETTIME
case CS_CGETTIME: {
struct timespec ts;
-#ifdef FIO_HAVE_CLOCK_MONOTONIC
- if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) {
-#else
- if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
-#endif
+ if (fill_clock_gettime(&ts) < 0) {
log_err("fio: clock_gettime fails\n");
assert(0);
}
tp->tv_usec = ts.tv_nsec / 1000;
break;
}
+#endif
#ifdef ARCH_HAVE_CPU_CLOCK
case CS_CPUCLOCK: {
unsigned long long usecs, t;
t = get_cpu_clock();
- if (t < last_cycles) {
+ if (tv && t < tv->last_cycles) {
dprint(FD_TIME, "CPU clock going back in time\n");
- t = last_cycles;
- }
+ t = tv->last_cycles;
+ } else if (tv)
+ tv->last_cycles = t;
- usecs = t / cycles_per_usec;
+ usecs = (t * inv_cycles_per_usec) / 16777216UL;
tp->tv_sec = usecs / 1000000;
tp->tv_usec = usecs % 1000000;
- last_cycles = t;
break;
}
#endif
break;
}
+ return tv;
+}
+
+#ifdef FIO_DEBUG_TIME
+void fio_gettime(struct timeval *tp, void *caller)
+#else
+void fio_gettime(struct timeval *tp, void fio_unused *caller)
+#endif
+{
+ struct tv_valid *tv;
+
+#ifdef FIO_DEBUG_TIME
+ if (!caller)
+ caller = __builtin_return_address(0);
+
+ gtod_log_caller(caller);
+#endif
+ if (fio_tv) {
+ memcpy(tp, fio_tv, sizeof(*tp));
+ return;
+ }
+
+ tv = __fio_gettime(tp);
+
/*
* If Linux is using the tsc clock on non-synced processors,
* sometimes time can appear to drift backwards. Fix that up.
*/
- if (last_tv_valid) {
- if (tp->tv_sec < last_tv.tv_sec)
- tp->tv_sec = last_tv.tv_sec;
- else if (last_tv.tv_sec == tp->tv_sec &&
- tp->tv_usec < last_tv.tv_usec)
- tp->tv_usec = last_tv.tv_usec;
+ if (tv) {
+ if (tv->last_tv_valid) {
+ if (tp->tv_sec < tv->last_tv.tv_sec)
+ tp->tv_sec = tv->last_tv.tv_sec;
+ else if (tv->last_tv.tv_sec == tp->tv_sec &&
+ tp->tv_usec < tv->last_tv.tv_usec)
+ tp->tv_usec = tv->last_tv.tv_usec;
+ }
+ tv->last_tv_valid = 1;
+ memcpy(&tv->last_tv, tp, sizeof(*tp));
}
- last_tv_valid = 1;
- memcpy(&last_tv, tp, sizeof(*tp));
}
#ifdef ARCH_HAVE_CPU_CLOCK
{
struct timeval s, e;
unsigned long long c_s, c_e;
+ enum fio_cs old_cs = fio_clock_source;
+
+#ifdef CONFIG_CLOCK_GETTIME
+ fio_clock_source = CS_CGETTIME;
+#else
+ fio_clock_source = CS_GTOD;
+#endif
+ __fio_gettime(&s);
- gettimeofday(&s, NULL);
c_s = get_cpu_clock();
do {
unsigned long long elapsed;
- gettimeofday(&e, NULL);
+ __fio_gettime(&e);
+
elapsed = utime_since(&s, &e);
if (elapsed >= 1280) {
c_e = get_cpu_clock();
}
} while (1);
+ fio_clock_source = old_cs;
return (c_e - c_s + 127) >> 7;
}
dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
avg /= samples;
- avg = (avg + 9) / 10;
+ avg = (avg + 5) / 10;
dprint(FD_TIME, "avg: %lu\n", avg);
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
+ inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
}
#else
static void calibrate_cpu_clock(void)
}
#endif
+#ifndef CONFIG_TLS_THREAD
+void fio_local_clock_init(int is_thread)
+{
+ struct tv_valid *t;
+
+ t = calloc(sizeof(*t), 1);
+ if (pthread_setspecific(tv_tls_key, t))
+ log_err("fio: can't set TLS key\n");
+}
+
+static void kill_tv_tls_key(void *data)
+{
+ free(data);
+}
+#else
+void fio_local_clock_init(int is_thread)
+{
+}
+#endif
+
void fio_clock_init(void)
{
if (fio_clock_source == fio_clock_source_inited)
return;
- last_tv_valid = 0;
+#ifndef CONFIG_TLS_THREAD
+ if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
+ log_err("fio: can't create TLS key\n");
+#endif
+
fio_clock_source_inited = fio_clock_source;
calibrate_cpu_clock();
log_info("fio: clocksource=cpu may not be reliable\n");
}
-unsigned long long utime_since(struct timeval *s, struct timeval *e)
+uint64_t utime_since(struct timeval *s, struct timeval *e)
{
long sec, usec;
- unsigned long long ret;
+ uint64_t ret;
sec = e->tv_sec - s->tv_sec;
usec = e->tv_usec - s->tv_usec;
return ret;
}
-unsigned long long utime_since_now(struct timeval *s)
+uint64_t utime_since_now(struct timeval *s)
{
struct timeval t;
return utime_since(s, &t);
}
-unsigned long mtime_since(struct timeval *s, struct timeval *e)
+uint64_t mtime_since(struct timeval *s, struct timeval *e)
{
long sec, usec, ret;
return ret;
}
-unsigned long mtime_since_now(struct timeval *s)
+uint64_t mtime_since_now(struct timeval *s)
{
struct timeval t;
void *p = __builtin_return_address(0);
return mtime_since(s, &t);
}
-unsigned long time_since_now(struct timeval *s)
+uint64_t time_since_now(struct timeval *s)
{
return mtime_since_now(s) / 1000;
}
-#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)
+#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
+ defined(CONFIG_SFAA)
#define CLOCK_ENTRIES 100000