#if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
static unsigned long cycles_per_usec;
static unsigned long inv_cycles_per_usec;
+static uint64_t max_cycles_for_mult;
+static unsigned long long cycles_start, cycles_wrap;
#endif
int tsc_reliable = 0;
return NULL;
}
-static struct gtod_log *find_log(void *caller)
+static void inc_caller(void *caller)
{
struct gtod_log *log = find_hash(caller);
flist_add_tail(&log->list, &hash[h]);
}
- return log;
+ log->calls++;
}
static void gtod_log_caller(void *caller)
{
- if (gtod_inited) {
- struct gtod_log *log = find_log(caller);
-
- log->calls++;
- }
+ if (gtod_inited)
+ inc_caller(caller);
}
static void fio_exit fio_dump_gtod(void)
uint64_t usecs, t;
t = get_cpu_clock();
- if (t < tv->last_cycles && tv->last_tv_valid &&
- !tv->warned) {
- log_err("fio: CPU clock going back in time\n");
- tv->warned = 1;
+ if (t < cycles_start && !cycles_wrap)
+ cycles_wrap = 1;
+ else if (cycles_wrap && t >= cycles_start) {
+ if (!tv->warned) {
+ log_err("fio: double CPU clock wrap\n");
+ tv->warned = 1;
+ }
}
+ t -= cycles_start;
tv->last_cycles = t;
tv->last_tv_valid = 1;
#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
usecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC;
#else
- usecs = (t * inv_cycles_per_usec) / 16777216UL;
+ if (t < max_cycles_for_mult)
+ usecs = (t * inv_cycles_per_usec) / 16777216UL;
+ else
+ usecs = t / cycles_per_usec;
#endif
tp->tv_sec = usecs / 1000000;
tp->tv_usec = usecs % 1000000;
gtod_log_caller(caller);
#endif
- if (fio_unlikely(fio_tv)) {
- memcpy(tp, fio_tv, sizeof(*tp));
+ if (fio_unlikely(fio_gettime_offload(tp)))
return;
- }
__fio_gettime(tp);
}
cycles_per_usec = avg;
inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ max_cycles_for_mult = ~0ULL / inv_cycles_per_usec;
dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
+ cycles_start = get_cpu_clock();
+ dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
return 0;
}
#else