16 #ifdef ARCH_HAVE_CPU_CLOCK
17 static unsigned long cycles_per_usec;
18 static unsigned long inv_cycles_per_usec;
23 struct timeval last_tv;
27 #ifdef CONFIG_TLS_THREAD
28 static __thread struct tv_valid static_tv_valid;
30 static pthread_key_t tv_tls_key;
33 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
34 int fio_clock_source_set = 0;
35 enum fio_cs fio_clock_source_inited = CS_INVAL;
40 #define HASH_SIZE (1 << HASH_BITS)
42 static struct flist_head hash[HASH_SIZE];
43 static int gtod_inited;
46 struct flist_head list;
51 static struct gtod_log *find_hash(void *caller)
53 unsigned long h = hash_ptr(caller, HASH_BITS);
54 struct flist_head *entry;
56 flist_for_each(entry, &hash[h]) {
57 struct gtod_log *log = flist_entry(entry, struct gtod_log,
60 if (log->caller == caller)
67 static struct gtod_log *find_log(void *caller)
69 struct gtod_log *log = find_hash(caller);
74 log = malloc(sizeof(*log));
75 INIT_FLIST_HEAD(&log->list);
79 h = hash_ptr(caller, HASH_BITS);
80 flist_add_tail(&log->list, &hash[h]);
86 static void gtod_log_caller(void *caller)
89 struct gtod_log *log = find_log(caller);
95 static void fio_exit fio_dump_gtod(void)
97 unsigned long total_calls = 0;
100 for (i = 0; i < HASH_SIZE; i++) {
101 struct flist_head *entry;
102 struct gtod_log *log;
104 flist_for_each(entry, &hash[i]) {
105 log = flist_entry(entry, struct gtod_log, list);
107 printf("function %p, calls %lu\n", log->caller,
109 total_calls += log->calls;
113 printf("Total %lu gettimeofday\n", total_calls);
116 static void fio_init gtod_init(void)
120 for (i = 0; i < HASH_SIZE; i++)
121 INIT_FLIST_HEAD(&hash[i]);
126 #endif /* FIO_DEBUG_TIME */
128 #ifdef CONFIG_CLOCK_GETTIME
129 static int fill_clock_gettime(struct timespec *ts)
131 #ifdef CONFIG_CLOCK_MONOTONIC
132 return clock_gettime(CLOCK_MONOTONIC, ts);
134 return clock_gettime(CLOCK_REALTIME, ts);
139 static void *__fio_gettime(struct timeval *tp)
143 #ifdef CONFIG_TLS_THREAD
144 tv = &static_tv_valid;
146 tv = pthread_getspecific(tv_tls_key);
149 switch (fio_clock_source) {
150 #ifdef CONFIG_GETTIMEOFDAY
152 gettimeofday(tp, NULL);
155 #ifdef CONFIG_CLOCK_GETTIME
159 if (fill_clock_gettime(&ts) < 0) {
160 log_err("fio: clock_gettime fails\n");
164 tp->tv_sec = ts.tv_sec;
165 tp->tv_usec = ts.tv_nsec / 1000;
169 #ifdef ARCH_HAVE_CPU_CLOCK
174 if (tv && t < tv->last_cycles) {
175 dprint(FD_TIME, "CPU clock going back in time\n");
180 usecs = (t * inv_cycles_per_usec) / 16777216UL;
181 tp->tv_sec = usecs / 1000000;
182 tp->tv_usec = usecs % 1000000;
187 log_err("fio: invalid clock source %d\n", fio_clock_source);
194 #ifdef FIO_DEBUG_TIME
195 void fio_gettime(struct timeval *tp, void *caller)
197 void fio_gettime(struct timeval *tp, void fio_unused *caller)
202 #ifdef FIO_DEBUG_TIME
204 caller = __builtin_return_address(0);
206 gtod_log_caller(caller);
209 memcpy(tp, fio_tv, sizeof(*tp));
213 tv = __fio_gettime(tp);
216 * If Linux is using the tsc clock on non-synced processors,
217 * sometimes time can appear to drift backwards. Fix that up.
220 if (tv->last_tv_valid) {
221 if (tp->tv_sec < tv->last_tv.tv_sec)
222 tp->tv_sec = tv->last_tv.tv_sec;
223 else if (tv->last_tv.tv_sec == tp->tv_sec &&
224 tp->tv_usec < tv->last_tv.tv_usec)
225 tp->tv_usec = tv->last_tv.tv_usec;
227 tv->last_tv_valid = 1;
228 memcpy(&tv->last_tv, tp, sizeof(*tp));
232 #ifdef ARCH_HAVE_CPU_CLOCK
233 static unsigned long get_cycles_per_usec(void)
237 enum fio_cs old_cs = fio_clock_source;
239 #ifdef CONFIG_CLOCK_GETTIME
240 fio_clock_source = CS_CGETTIME;
242 fio_clock_source = CS_GTOD;
246 c_s = get_cpu_clock();
252 elapsed = utime_since(&s, &e);
253 if (elapsed >= 1280) {
254 c_e = get_cpu_clock();
259 fio_clock_source = old_cs;
260 return (c_e - c_s + 127) >> 7;
263 #define NR_TIME_ITERS 50
265 static int calibrate_cpu_clock(void)
267 double delta, mean, S;
268 uint64_t avg, cycles[NR_TIME_ITERS];
271 cycles[0] = get_cycles_per_usec();
272 S = delta = mean = 0.0;
273 for (i = 0; i < NR_TIME_ITERS; i++) {
274 cycles[i] = get_cycles_per_usec();
275 delta = cycles[i] - mean;
277 mean += delta / (i + 1.0);
278 S += delta * (cycles[i] - mean);
283 * The most common platform clock breakage is returning zero
284 * indefinitely. Check for that and return failure.
286 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
289 S = sqrt(S / (NR_TIME_ITERS - 1.0));
292 for (i = 0; i < NR_TIME_ITERS; i++) {
293 double this = cycles[i];
295 if ((fmax(this, mean) - fmin(this, mean)) > S)
301 S /= (double) NR_TIME_ITERS;
304 for (i = 0; i < NR_TIME_ITERS; i++)
305 dprint(FD_TIME, "cycles[%d]=%llu\n", i,
306 (unsigned long long) cycles[i] / 10);
309 avg = (avg + 5) / 10;
310 dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
311 dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
313 cycles_per_usec = avg;
314 inv_cycles_per_usec = 16777216UL / cycles_per_usec;
315 dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
319 static int calibrate_cpu_clock(void)
325 #ifndef CONFIG_TLS_THREAD
326 void fio_local_clock_init(int is_thread)
330 t = calloc(sizeof(*t), 1);
331 if (pthread_setspecific(tv_tls_key, t))
332 log_err("fio: can't set TLS key\n");
335 static void kill_tv_tls_key(void *data)
340 void fio_local_clock_init(int is_thread)
345 void fio_clock_init(void)
347 if (fio_clock_source == fio_clock_source_inited)
350 #ifndef CONFIG_TLS_THREAD
351 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
352 log_err("fio: can't create TLS key\n");
355 fio_clock_source_inited = fio_clock_source;
357 if (calibrate_cpu_clock())
361 * If the arch sets tsc_reliable != 0, then it must be good enough
362 * to use as THE clock source. For x86 CPUs, this means the TSC
363 * runs at a constant rate and is synced across CPU cores.
366 if (!fio_clock_source_set)
367 fio_clock_source = CS_CPUCLOCK;
368 } else if (fio_clock_source == CS_CPUCLOCK)
369 log_info("fio: clocksource=cpu may not be reliable\n");
372 uint64_t utime_since(struct timeval *s, struct timeval *e)
377 sec = e->tv_sec - s->tv_sec;
378 usec = e->tv_usec - s->tv_usec;
379 if (sec > 0 && usec < 0) {
385 * time warp bug on some kernels?
387 if (sec < 0 || (sec == 0 && usec < 0))
390 ret = sec * 1000000ULL + usec;
395 uint64_t utime_since_now(struct timeval *s)
399 fio_gettime(&t, NULL);
400 return utime_since(s, &t);
403 uint64_t mtime_since(struct timeval *s, struct timeval *e)
407 sec = e->tv_sec - s->tv_sec;
408 usec = e->tv_usec - s->tv_usec;
409 if (sec > 0 && usec < 0) {
414 if (sec < 0 || (sec == 0 && usec < 0))
424 uint64_t mtime_since_now(struct timeval *s)
427 void *p = __builtin_return_address(0);
430 return mtime_since(s, &t);
433 uint64_t time_since_now(struct timeval *s)
435 return mtime_since_now(s) / 1000;
438 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
441 #define CLOCK_ENTRIES 100000
449 struct clock_thread {
452 pthread_mutex_t lock;
453 pthread_mutex_t started;
455 struct clock_entry *entries;
458 static inline uint32_t atomic32_inc_return(uint32_t *seq)
460 return 1 + __sync_fetch_and_add(seq, 1);
463 static void *clock_thread_fn(void *data)
465 struct clock_thread *t = data;
466 struct clock_entry *c;
467 os_cpu_mask_t cpu_mask;
471 memset(&cpu_mask, 0, sizeof(cpu_mask));
472 fio_cpu_set(&cpu_mask, t->cpu);
474 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
475 log_err("clock setaffinity failed\n");
479 pthread_mutex_lock(&t->lock);
480 pthread_mutex_unlock(&t->started);
484 for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
490 seq = atomic32_inc_return(t->seq);
493 tsc = get_cpu_clock();
494 } while (seq != *t->seq);
500 log_info("cs: cpu%3d: %llu clocks seen\n", t->cpu,
501 (unsigned long long) t->entries[i - 1].tsc - t->entries[0].tsc);
504 * The most common platform clock breakage is returning zero
505 * indefinitely. Check for that and return failure.
507 if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
513 static int clock_cmp(const void *p1, const void *p2)
515 const struct clock_entry *c1 = p1;
516 const struct clock_entry *c2 = p2;
518 if (c1->seq == c2->seq)
519 log_err("cs: bug in atomic sequence!\n");
521 return c1->seq - c2->seq;
524 int fio_monotonic_clocktest(void)
526 struct clock_thread *threads;
527 unsigned int nr_cpus = cpus_online();
528 struct clock_entry *entries;
529 unsigned long tentries, failed;
530 struct clock_entry *prev, *this;
534 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
536 fio_debug |= 1U << FD_TIME;
537 calibrate_cpu_clock();
538 fio_debug &= ~(1U << FD_TIME);
540 threads = malloc(nr_cpus * sizeof(struct clock_thread));
541 tentries = CLOCK_ENTRIES * nr_cpus;
542 entries = malloc(tentries * sizeof(struct clock_entry));
544 log_info("cs: Testing %u CPUs\n", nr_cpus);
546 for (i = 0; i < nr_cpus; i++) {
547 struct clock_thread *t = &threads[i];
551 t->entries = &entries[i * CLOCK_ENTRIES];
552 pthread_mutex_init(&t->lock, NULL);
553 pthread_mutex_init(&t->started, NULL);
554 pthread_mutex_lock(&t->lock);
555 pthread_create(&t->thread, NULL, clock_thread_fn, t);
558 for (i = 0; i < nr_cpus; i++) {
559 struct clock_thread *t = &threads[i];
561 pthread_mutex_lock(&t->started);
564 for (i = 0; i < nr_cpus; i++) {
565 struct clock_thread *t = &threads[i];
567 pthread_mutex_unlock(&t->lock);
570 for (failed = i = 0; i < nr_cpus; i++) {
571 struct clock_thread *t = &threads[i];
574 pthread_join(t->thread, &ret);
581 log_err("Clocksource test: %lu threads failed\n", failed);
585 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
587 for (failed = i = 0; i < tentries; i++) {
595 if (prev->tsc > this->tsc) {
596 uint64_t diff = prev->tsc - this->tsc;
598 log_info("cs: CPU clock mismatch (diff=%llu):\n",
599 (unsigned long long) diff);
600 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
601 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
609 log_info("cs: Failed: %lu\n", failed);
611 log_info("cs: Pass!\n");
618 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
620 int fio_monotonic_clocktest(void)
622 log_info("cs: current platform does not support CPU clocks\n");