#if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
static unsigned long cycles_per_usec;
static unsigned long inv_cycles_per_usec;
+static uint64_t max_cycles_for_mult;
+static unsigned long long cycles_start, cycles_wrap;
#endif
int tsc_reliable = 0;
struct tv_valid {
- struct timeval last_tv;
uint64_t last_cycles;
int last_tv_valid;
+ int warned;
};
+#ifdef ARCH_HAVE_CPU_CLOCK
#ifdef CONFIG_TLS_THREAD
static __thread struct tv_valid static_tv_valid;
#else
static pthread_key_t tv_tls_key;
#endif
+#endif
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
return NULL;
}
-static struct gtod_log *find_log(void *caller)
+static void inc_caller(void *caller)
{
struct gtod_log *log = find_hash(caller);
flist_add_tail(&log->list, &hash[h]);
}
- return log;
+ log->calls++;
}
static void gtod_log_caller(void *caller)
{
- if (gtod_inited) {
- struct gtod_log *log = find_log(caller);
-
- log->calls++;
- }
+ if (gtod_inited)
+ inc_caller(caller);
}
static void fio_exit fio_dump_gtod(void)
}
#endif
-static void *__fio_gettime(struct timeval *tp)
+static void __fio_gettime(struct timeval *tp)
{
- struct tv_valid *tv;
-
-#ifdef CONFIG_TLS_THREAD
- tv = &static_tv_valid;
-#else
- tv = pthread_getspecific(tv_tls_key);
-#endif
-
switch (fio_clock_source) {
#ifdef CONFIG_GETTIMEOFDAY
case CS_GTOD:
#ifdef ARCH_HAVE_CPU_CLOCK
case CS_CPUCLOCK: {
uint64_t usecs, t;
+ struct tv_valid *tv;
+
+#ifdef CONFIG_TLS_THREAD
+ tv = &static_tv_valid;
+#else
+ tv = pthread_getspecific(tv_tls_key);
+#endif
t = get_cpu_clock();
- if (tv && t < tv->last_cycles) {
- dprint(FD_TIME, "CPU clock going back in time\n");
- t = tv->last_cycles;
- } else if (tv)
- tv->last_cycles = t;
+ if (t < cycles_start && !cycles_wrap)
+ cycles_wrap = 1;
+ else if (cycles_wrap && t >= cycles_start && !tv->warned) {
+ log_err("fio: double CPU clock wrap\n");
+ tv->warned = 1;
+ }
+ t -= cycles_start;
+ tv->last_cycles = t;
+ tv->last_tv_valid = 1;
#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
usecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC;
#else
- usecs = (t * inv_cycles_per_usec) / 16777216UL;
+ if (t < max_cycles_for_mult)
+ usecs = (t * inv_cycles_per_usec) / 16777216UL;
+ else
+ usecs = t / cycles_per_usec;
#endif
tp->tv_sec = usecs / 1000000;
tp->tv_usec = usecs % 1000000;
log_err("fio: invalid clock source %d\n", fio_clock_source);
break;
}
-
- return tv;
}
#ifdef FIO_DEBUG_TIME
void fio_gettime(struct timeval *tp, void fio_unused *caller)
#endif
{
- struct tv_valid *tv;
-
#ifdef FIO_DEBUG_TIME
if (!caller)
caller = __builtin_return_address(0);
gtod_log_caller(caller);
#endif
- if (fio_unlikely(fio_tv)) {
- memcpy(tp, fio_tv, sizeof(*tp));
+ if (fio_unlikely(fio_gettime_offload(tp)))
return;
- }
- tv = __fio_gettime(tp);
-
- /*
- * If Linux is using the tsc clock on non-synced processors,
- * sometimes time can appear to drift backwards. Fix that up.
- */
- if (tv) {
- if (tv->last_tv_valid) {
- if (tp->tv_sec < tv->last_tv.tv_sec)
- tp->tv_sec = tv->last_tv.tv_sec;
- else if (tv->last_tv.tv_sec == tp->tv_sec &&
- tp->tv_usec < tv->last_tv.tv_usec)
- tp->tv_usec = tv->last_tv.tv_usec;
- }
- tv->last_tv_valid = 1;
- memcpy(&tv->last_tv, tp, sizeof(*tp));
- }
+ __fio_gettime(tp);
}
#if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
cycles_per_usec = avg;
inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ max_cycles_for_mult = ~0ULL / inv_cycles_per_usec;
dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
+ cycles_start = get_cpu_clock();
+ dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
return 0;
}
#else
struct tv_valid *t;
t = calloc(1, sizeof(*t));
- if (pthread_setspecific(tv_tls_key, t))
+ if (pthread_setspecific(tv_tls_key, t)) {
log_err("fio: can't set TLS key\n");
+ assert(0);
+ }
}
static void kill_tv_tls_key(void *data)
log_info("fio: clocksource=cpu may not be reliable\n");
}
-uint64_t utime_since(struct timeval *s, struct timeval *e)
+uint64_t utime_since(const struct timeval *s, const struct timeval *e)
{
long sec, usec;
uint64_t ret;
return ret;
}
-uint64_t utime_since_now(struct timeval *s)
+uint64_t utime_since_now(const struct timeval *s)
{
struct timeval t;
return utime_since(s, &t);
}
-uint64_t mtime_since(struct timeval *s, struct timeval *e)
+uint64_t mtime_since(const struct timeval *s, const struct timeval *e)
{
long sec, usec, ret;
return ret;
}
-uint64_t mtime_since_now(struct timeval *s)
+uint64_t mtime_since_now(const struct timeval *s)
{
struct timeval t;
void *p = __builtin_return_address(0);
return mtime_since(s, &t);
}
-uint64_t time_since_now(struct timeval *s)
+uint64_t time_since_now(const struct timeval *s)
{
return mtime_since_now(s) / 1000;
}
int fio_monotonic_clocktest(void)
{
- struct clock_thread *threads;
+ struct clock_thread *cthreads;
unsigned int nr_cpus = cpus_online();
struct clock_entry *entries;
unsigned long tentries, failed = 0;
log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
+#ifdef FIO_INC_DEBUG
fio_debug |= 1U << FD_TIME;
+#endif
calibrate_cpu_clock();
+#ifdef FIO_INC_DEBUG
fio_debug &= ~(1U << FD_TIME);
+#endif
- threads = malloc(nr_cpus * sizeof(struct clock_thread));
+ cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
tentries = CLOCK_ENTRIES * nr_cpus;
entries = malloc(tentries * sizeof(struct clock_entry));
log_info("cs: Testing %u CPUs\n", nr_cpus);
for (i = 0; i < nr_cpus; i++) {
- struct clock_thread *t = &threads[i];
+ struct clock_thread *t = &cthreads[i];
t->cpu = i;
t->seq = &seq;
}
for (i = 0; i < nr_cpus; i++) {
- struct clock_thread *t = &threads[i];
+ struct clock_thread *t = &cthreads[i];
pthread_mutex_lock(&t->started);
}
for (i = 0; i < nr_cpus; i++) {
- struct clock_thread *t = &threads[i];
+ struct clock_thread *t = &cthreads[i];
pthread_mutex_unlock(&t->lock);
}
for (i = 0; i < nr_cpus; i++) {
- struct clock_thread *t = &threads[i];
+ struct clock_thread *t = &cthreads[i];
void *ret;
pthread_join(t->thread, &ret);
if (ret)
failed++;
}
- free(threads);
+ free(cthreads);
if (failed) {
log_err("Clocksource test: %lu threads failed\n", failed);