X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=gettime.c;h=f29edcb7bd64bb2085a03cae25c0ae25057c22aa;hp=df329f66d3d5735b3225411315303f87b039c796;hb=d5e3f5d8ef7fab70288aa57c8660bc3d2199655f;hpb=71339117638469372f703332d8b969382b12c49b diff --git a/gettime.c b/gettime.c index df329f66..f29edcb7 100644 --- a/gettime.c +++ b/gettime.c @@ -16,15 +16,19 @@ #ifdef ARCH_HAVE_CPU_CLOCK static unsigned long cycles_per_usec; static unsigned long inv_cycles_per_usec; -int tsc_reliable = 0; #endif +int tsc_reliable = 0; struct tv_valid { struct timeval last_tv; int last_tv_valid; unsigned long last_cycles; }; +#ifdef CONFIG_TLS_THREAD +static struct tv_valid __thread static_tv_valid; +#else static pthread_key_t tv_tls_key; +#endif enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE; int fio_clock_source_set = 0; @@ -121,40 +125,34 @@ static void fio_init gtod_init(void) #endif /* FIO_DEBUG_TIME */ +#ifdef CONFIG_CLOCK_GETTIME static int fill_clock_gettime(struct timespec *ts) { -#ifdef FIO_HAVE_CLOCK_MONOTONIC +#ifdef CONFIG_CLOCK_MONOTONIC return clock_gettime(CLOCK_MONOTONIC, ts); #else return clock_gettime(CLOCK_REALTIME, ts); #endif } - -#ifdef FIO_DEBUG_TIME -void fio_gettime(struct timeval *tp, void *caller) -#else -void fio_gettime(struct timeval *tp, void fio_unused *caller) #endif + +static void *__fio_gettime(struct timeval *tp) { struct tv_valid *tv; -#ifdef FIO_DEBUG_TIME - if (!caller) - caller = __builtin_return_address(0); - - gtod_log_caller(caller); -#endif - if (fio_tv) { - memcpy(tp, fio_tv, sizeof(*tp)); - return; - } - +#ifdef CONFIG_TLS_THREAD + tv = &static_tv_valid; +#else tv = pthread_getspecific(tv_tls_key); +#endif switch (fio_clock_source) { +#ifdef CONFIG_GETTIMEOFDAY case CS_GTOD: gettimeofday(tp, NULL); break; +#endif +#ifdef CONFIG_CLOCK_GETTIME case CS_CGETTIME: { struct timespec ts; @@ -167,6 +165,7 @@ void fio_gettime(struct timeval *tp, void fio_unused *caller) tp->tv_usec = ts.tv_nsec / 1000; break; } +#endif #ifdef ARCH_HAVE_CPU_CLOCK case CS_CPUCLOCK: { unsigned long long usecs, t; @@ -189,6 +188,30 @@ void fio_gettime(struct timeval *tp, void fio_unused *caller) break; } + return tv; +} + +#ifdef FIO_DEBUG_TIME +void fio_gettime(struct timeval *tp, void *caller) +#else +void fio_gettime(struct timeval *tp, void fio_unused *caller) +#endif +{ + struct tv_valid *tv; + +#ifdef FIO_DEBUG_TIME + if (!caller) + caller = __builtin_return_address(0); + + gtod_log_caller(caller); +#endif + if (fio_tv) { + memcpy(tp, fio_tv, sizeof(*tp)); + return; + } + + tv = __fio_gettime(tp); + /* * If Linux is using the tsc clock on non-synced processors, * sometimes time can appear to drift backwards. Fix that up. @@ -209,21 +232,22 @@ void fio_gettime(struct timeval *tp, void fio_unused *caller) #ifdef ARCH_HAVE_CPU_CLOCK static unsigned long get_cycles_per_usec(void) { - struct timespec ts; struct timeval s, e; unsigned long long c_s, c_e; + enum fio_cs old_cs = fio_clock_source; - fill_clock_gettime(&ts); - s.tv_sec = ts.tv_sec; - s.tv_usec = ts.tv_nsec / 1000; +#ifdef CONFIG_CLOCK_GETTIME + fio_clock_source = CS_CGETTIME; +#else + fio_clock_source = CS_GTOD; +#endif + __fio_gettime(&s); c_s = get_cpu_clock(); do { unsigned long long elapsed; - fill_clock_gettime(&ts); - e.tv_sec = ts.tv_sec; - e.tv_usec = ts.tv_nsec / 1000; + __fio_gettime(&e); elapsed = utime_since(&s, &e); if (elapsed >= 1280) { @@ -232,6 +256,7 @@ static unsigned long get_cycles_per_usec(void) } } while (1); + fio_clock_source = old_cs; return (c_e - c_s + 127) >> 7; } @@ -273,7 +298,7 @@ static void calibrate_cpu_clock(void) dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10); avg /= samples; - avg = (avg + 9) / 10; + avg = (avg + 5) / 10; dprint(FD_TIME, "avg: %lu\n", avg); dprint(FD_TIME, "mean=%f, S=%f\n", mean, S); @@ -287,6 +312,7 @@ static void calibrate_cpu_clock(void) } #endif +#ifndef CONFIG_TLS_THREAD void fio_local_clock_init(int is_thread) { struct tv_valid *t; @@ -300,14 +326,21 @@ static void kill_tv_tls_key(void *data) { free(data); } +#else +void fio_local_clock_init(int is_thread) +{ +} +#endif void fio_clock_init(void) { if (fio_clock_source == fio_clock_source_inited) return; +#ifndef CONFIG_TLS_THREAD if (pthread_key_create(&tv_tls_key, kill_tv_tls_key)) log_err("fio: can't create TLS key\n"); +#endif fio_clock_source_inited = fio_clock_source; calibrate_cpu_clock(); @@ -324,10 +357,10 @@ void fio_clock_init(void) log_info("fio: clocksource=cpu may not be reliable\n"); } -unsigned long long utime_since(struct timeval *s, struct timeval *e) +uint64_t utime_since(struct timeval *s, struct timeval *e) { long sec, usec; - unsigned long long ret; + uint64_t ret; sec = e->tv_sec - s->tv_sec; usec = e->tv_usec - s->tv_usec; @@ -347,7 +380,7 @@ unsigned long long utime_since(struct timeval *s, struct timeval *e) return ret; } -unsigned long long utime_since_now(struct timeval *s) +uint64_t utime_since_now(struct timeval *s) { struct timeval t; @@ -355,7 +388,7 @@ unsigned long long utime_since_now(struct timeval *s) return utime_since(s, &t); } -unsigned long mtime_since(struct timeval *s, struct timeval *e) +uint64_t mtime_since(struct timeval *s, struct timeval *e) { long sec, usec, ret; @@ -376,7 +409,7 @@ unsigned long mtime_since(struct timeval *s, struct timeval *e) return ret; } -unsigned long mtime_since_now(struct timeval *s) +uint64_t mtime_since_now(struct timeval *s) { struct timeval t; void *p = __builtin_return_address(0); @@ -385,12 +418,13 @@ unsigned long mtime_since_now(struct timeval *s) return mtime_since(s, &t); } -unsigned long time_since_now(struct timeval *s) +uint64_t time_since_now(struct timeval *s) { return mtime_since_now(s) / 1000; } -#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) +#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \ + defined(CONFIG_SFAA) #define CLOCK_ENTRIES 100000 @@ -470,6 +504,8 @@ int fio_monotonic_clocktest(void) uint64_t seq = 0; int i; + log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no"); + fio_debug |= 1U << FD_TIME; calibrate_cpu_clock(); fio_debug &= ~(1U << FD_TIME);