#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long cycles_per_usec;
-static unsigned long last_cycles;
+static unsigned long inv_cycles_per_usec;
int tsc_reliable = 0;
#endif
-static struct timeval last_tv;
-static int last_tv_valid;
+
+struct tv_valid {
+ struct timeval last_tv;
+ int last_tv_valid;
+ unsigned long last_cycles;
+};
+static pthread_key_t tv_tls_key;
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
#endif /* FIO_DEBUG_TIME */
+static int fill_clock_gettime(struct timespec *ts)
+{
+#ifdef FIO_HAVE_CLOCK_MONOTONIC
+ return clock_gettime(CLOCK_MONOTONIC, ts);
+#else
+ return clock_gettime(CLOCK_REALTIME, ts);
+#endif
+}
+
#ifdef FIO_DEBUG_TIME
void fio_gettime(struct timeval *tp, void *caller)
#else
void fio_gettime(struct timeval *tp, void fio_unused *caller)
#endif
{
+ struct tv_valid *tv;
+
#ifdef FIO_DEBUG_TIME
if (!caller)
caller = __builtin_return_address(0);
return;
}
+ tv = pthread_getspecific(tv_tls_key);
+
switch (fio_clock_source) {
case CS_GTOD:
gettimeofday(tp, NULL);
case CS_CGETTIME: {
struct timespec ts;
-#ifdef FIO_HAVE_CLOCK_MONOTONIC
- if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) {
-#else
- if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
-#endif
+ if (fill_clock_gettime(&ts) < 0) {
log_err("fio: clock_gettime fails\n");
assert(0);
}
unsigned long long usecs, t;
t = get_cpu_clock();
- if (t < last_cycles) {
+ if (tv && t < tv->last_cycles) {
dprint(FD_TIME, "CPU clock going back in time\n");
- t = last_cycles;
- }
+ t = tv->last_cycles;
+ } else if (tv)
+ tv->last_cycles = t;
- usecs = t / cycles_per_usec;
+ usecs = (t * inv_cycles_per_usec) / 16777216UL;
tp->tv_sec = usecs / 1000000;
tp->tv_usec = usecs % 1000000;
- last_cycles = t;
break;
}
#endif
* If Linux is using the tsc clock on non-synced processors,
* sometimes time can appear to drift backwards. Fix that up.
*/
- if (last_tv_valid) {
- if (tp->tv_sec < last_tv.tv_sec)
- tp->tv_sec = last_tv.tv_sec;
- else if (last_tv.tv_sec == tp->tv_sec &&
- tp->tv_usec < last_tv.tv_usec)
- tp->tv_usec = last_tv.tv_usec;
+ if (tv) {
+ if (tv->last_tv_valid) {
+ if (tp->tv_sec < tv->last_tv.tv_sec)
+ tp->tv_sec = tv->last_tv.tv_sec;
+ else if (tv->last_tv.tv_sec == tp->tv_sec &&
+ tp->tv_usec < tv->last_tv.tv_usec)
+ tp->tv_usec = tv->last_tv.tv_usec;
+ }
+ tv->last_tv_valid = 1;
+ memcpy(&tv->last_tv, tp, sizeof(*tp));
}
- last_tv_valid = 1;
- memcpy(&last_tv, tp, sizeof(*tp));
}
#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long get_cycles_per_usec(void)
{
+ struct timespec ts;
struct timeval s, e;
unsigned long long c_s, c_e;
- gettimeofday(&s, NULL);
+ fill_clock_gettime(&ts);
+ s.tv_sec = ts.tv_sec;
+ s.tv_usec = ts.tv_nsec / 1000;
+
c_s = get_cpu_clock();
do {
unsigned long long elapsed;
- gettimeofday(&e, NULL);
+ fill_clock_gettime(&ts);
+ e.tv_sec = ts.tv_sec;
+ e.tv_usec = ts.tv_nsec / 1000;
+
elapsed = utime_since(&s, &e);
if (elapsed >= 1280) {
c_e = get_cpu_clock();
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
+ inv_cycles_per_usec = 16777216UL / cycles_per_usec;
+ dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
}
#else
static void calibrate_cpu_clock(void)
}
#endif
+void fio_local_clock_init(int is_thread)
+{
+ struct tv_valid *t;
+
+ t = calloc(sizeof(*t), 1);
+ if (pthread_setspecific(tv_tls_key, t))
+ log_err("fio: can't set TLS key\n");
+}
+
+static void kill_tv_tls_key(void *data)
+{
+ free(data);
+}
+
void fio_clock_init(void)
{
if (fio_clock_source == fio_clock_source_inited)
return;
- last_tv_valid = 0;
+ if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
+ log_err("fio: can't create TLS key\n");
+
fio_clock_source_inited = fio_clock_source;
calibrate_cpu_clock();