16 #ifdef ARCH_HAVE_CPU_CLOCK
17 static unsigned long cycles_per_usec;
18 static unsigned long last_cycles;
21 static struct timeval last_tv;
22 static int last_tv_valid;
24 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
25 int fio_clock_source_set = 0;
26 enum fio_cs fio_clock_source_inited = CS_INVAL;
31 #define HASH_SIZE (1 << HASH_BITS)
33 static struct flist_head hash[HASH_SIZE];
34 static int gtod_inited;
37 struct flist_head list;
42 static struct gtod_log *find_hash(void *caller)
44 unsigned long h = hash_ptr(caller, HASH_BITS);
45 struct flist_head *entry;
47 flist_for_each(entry, &hash[h]) {
48 struct gtod_log *log = flist_entry(entry, struct gtod_log,
51 if (log->caller == caller)
58 static struct gtod_log *find_log(void *caller)
60 struct gtod_log *log = find_hash(caller);
65 log = malloc(sizeof(*log));
66 INIT_FLIST_HEAD(&log->list);
70 h = hash_ptr(caller, HASH_BITS);
71 flist_add_tail(&log->list, &hash[h]);
77 static void gtod_log_caller(void *caller)
80 struct gtod_log *log = find_log(caller);
86 static void fio_exit fio_dump_gtod(void)
88 unsigned long total_calls = 0;
91 for (i = 0; i < HASH_SIZE; i++) {
92 struct flist_head *entry;
95 flist_for_each(entry, &hash[i]) {
96 log = flist_entry(entry, struct gtod_log, list);
98 printf("function %p, calls %lu\n", log->caller,
100 total_calls += log->calls;
104 printf("Total %lu gettimeofday\n", total_calls);
107 static void fio_init gtod_init(void)
111 for (i = 0; i < HASH_SIZE; i++)
112 INIT_FLIST_HEAD(&hash[i]);
117 #endif /* FIO_DEBUG_TIME */
119 #ifdef FIO_DEBUG_TIME
120 void fio_gettime(struct timeval *tp, void *caller)
122 void fio_gettime(struct timeval *tp, void fio_unused *caller)
125 #ifdef FIO_DEBUG_TIME
127 caller = __builtin_return_address(0);
129 gtod_log_caller(caller);
132 memcpy(tp, fio_tv, sizeof(*tp));
136 switch (fio_clock_source) {
138 gettimeofday(tp, NULL);
143 #ifdef FIO_HAVE_CLOCK_MONOTONIC
144 if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) {
146 if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
148 log_err("fio: clock_gettime fails\n");
152 tp->tv_sec = ts.tv_sec;
153 tp->tv_usec = ts.tv_nsec / 1000;
156 #ifdef ARCH_HAVE_CPU_CLOCK
158 unsigned long long usecs, t;
161 if (t < last_cycles) {
162 dprint(FD_TIME, "CPU clock going back in time\n");
166 usecs = t / cycles_per_usec;
167 tp->tv_sec = usecs / 1000000;
168 tp->tv_usec = usecs % 1000000;
174 log_err("fio: invalid clock source %d\n", fio_clock_source);
179 * If Linux is using the tsc clock on non-synced processors,
180 * sometimes time can appear to drift backwards. Fix that up.
183 if (tp->tv_sec < last_tv.tv_sec)
184 tp->tv_sec = last_tv.tv_sec;
185 else if (last_tv.tv_sec == tp->tv_sec &&
186 tp->tv_usec < last_tv.tv_usec)
187 tp->tv_usec = last_tv.tv_usec;
190 memcpy(&last_tv, tp, sizeof(*tp));
193 #ifdef ARCH_HAVE_CPU_CLOCK
194 static unsigned long get_cycles_per_usec(void)
197 unsigned long long c_s, c_e;
199 gettimeofday(&s, NULL);
200 c_s = get_cpu_clock();
202 unsigned long long elapsed;
204 gettimeofday(&e, NULL);
205 elapsed = utime_since(&s, &e);
206 if (elapsed >= 1280) {
207 c_e = get_cpu_clock();
212 return (c_e - c_s + 127) >> 7;
215 #define NR_TIME_ITERS 50
217 static void calibrate_cpu_clock(void)
219 double delta, mean, S;
220 unsigned long avg, cycles[NR_TIME_ITERS];
223 cycles[0] = get_cycles_per_usec();
224 S = delta = mean = 0.0;
225 for (i = 0; i < NR_TIME_ITERS; i++) {
226 cycles[i] = get_cycles_per_usec();
227 delta = cycles[i] - mean;
229 mean += delta / (i + 1.0);
230 S += delta * (cycles[i] - mean);
234 S = sqrt(S / (NR_TIME_ITERS - 1.0));
237 for (i = 0; i < NR_TIME_ITERS; i++) {
238 double this = cycles[i];
240 if ((fmax(this, mean) - fmin(this, mean)) > S)
246 S /= (double) NR_TIME_ITERS;
249 for (i = 0; i < NR_TIME_ITERS; i++)
250 dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
253 avg = (avg + 9) / 10;
254 dprint(FD_TIME, "avg: %lu\n", avg);
255 dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
257 cycles_per_usec = avg;
260 static void calibrate_cpu_clock(void)
265 void fio_clock_init(void)
267 if (fio_clock_source == fio_clock_source_inited)
271 fio_clock_source_inited = fio_clock_source;
272 calibrate_cpu_clock();
275 * If the arch sets tsc_reliable != 0, then it must be good enough
276 * to use as THE clock source. For x86 CPUs, this means the TSC
277 * runs at a constant rate and is synced across CPU cores.
280 if (!fio_clock_source_set)
281 fio_clock_source = CS_CPUCLOCK;
282 } else if (fio_clock_source == CS_CPUCLOCK)
283 log_info("fio: clocksource=cpu may not be reliable\n");
286 unsigned long long utime_since(struct timeval *s, struct timeval *e)
289 unsigned long long ret;
291 sec = e->tv_sec - s->tv_sec;
292 usec = e->tv_usec - s->tv_usec;
293 if (sec > 0 && usec < 0) {
299 * time warp bug on some kernels?
301 if (sec < 0 || (sec == 0 && usec < 0))
304 ret = sec * 1000000ULL + usec;
309 unsigned long long utime_since_now(struct timeval *s)
313 fio_gettime(&t, NULL);
314 return utime_since(s, &t);
317 unsigned long mtime_since(struct timeval *s, struct timeval *e)
321 sec = e->tv_sec - s->tv_sec;
322 usec = e->tv_usec - s->tv_usec;
323 if (sec > 0 && usec < 0) {
328 if (sec < 0 || (sec == 0 && usec < 0))
338 unsigned long mtime_since_now(struct timeval *s)
341 void *p = __builtin_return_address(0);
344 return mtime_since(s, &t);
347 unsigned long time_since_now(struct timeval *s)
349 return mtime_since_now(s) / 1000;
352 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)
354 #define CLOCK_ENTRIES 100000
362 struct clock_thread {
365 pthread_mutex_t lock;
366 pthread_mutex_t started;
368 struct clock_entry *entries;
371 static inline uint64_t atomic64_inc_return(uint64_t *seq)
373 return 1 + __sync_fetch_and_add(seq, 1);
376 static void *clock_thread_fn(void *data)
378 struct clock_thread *t = data;
379 struct clock_entry *c;
380 os_cpu_mask_t cpu_mask;
383 memset(&cpu_mask, 0, sizeof(cpu_mask));
384 fio_cpu_set(&cpu_mask, t->cpu);
386 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
387 log_err("clock setaffinity failed\n");
391 pthread_mutex_lock(&t->lock);
392 pthread_mutex_unlock(&t->started);
395 for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
400 seq = atomic64_inc_return(t->seq);
401 tsc = get_cpu_clock();
402 } while (seq != *t->seq);
408 log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[CLOCK_ENTRIES - 1].tsc - t->entries[0].tsc);
412 static int clock_cmp(const void *p1, const void *p2)
414 const struct clock_entry *c1 = p1;
415 const struct clock_entry *c2 = p2;
417 if (c1->seq == c2->seq)
418 log_err("cs: bug in atomic sequence!\n");
420 return c1->seq - c2->seq;
423 int fio_monotonic_clocktest(void)
425 struct clock_thread *threads;
426 unsigned int nr_cpus = cpus_online();
427 struct clock_entry *entries;
428 unsigned long tentries, failed;
432 threads = malloc(nr_cpus * sizeof(struct clock_thread));
433 tentries = CLOCK_ENTRIES * nr_cpus;
434 entries = malloc(tentries * sizeof(struct clock_entry));
436 log_info("cs: Testing %u CPUs\n", nr_cpus);
438 for (i = 0; i < nr_cpus; i++) {
439 struct clock_thread *t = &threads[i];
443 t->entries = &entries[i * CLOCK_ENTRIES];
444 pthread_mutex_init(&t->lock, NULL);
445 pthread_mutex_init(&t->started, NULL);
446 pthread_mutex_lock(&t->lock);
447 pthread_create(&t->thread, NULL, clock_thread_fn, t);
450 for (i = 0; i < nr_cpus; i++) {
451 struct clock_thread *t = &threads[i];
453 pthread_mutex_lock(&t->started);
456 for (i = 0; i < nr_cpus; i++) {
457 struct clock_thread *t = &threads[i];
459 pthread_mutex_unlock(&t->lock);
462 for (failed = i = 0; i < nr_cpus; i++) {
463 struct clock_thread *t = &threads[i];
466 pthread_join(t->thread, &ret);
473 log_err("Clocksource test: %u threads failed\n", failed);
477 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
479 for (failed = i = 0; i < tentries; i++) {
480 struct clock_entry *prev, *this = &entries[i];
487 if (prev->tsc > this->tsc) {
488 uint64_t diff = prev->tsc - this->tsc;
490 log_info("cs: CPU clock mismatch (diff=%lu):\n", diff);
491 log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", prev->cpu, prev->tsc, prev->seq);
492 log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", this->cpu, this->tsc, this->seq);
500 log_info("cs: Failed: %lu\n", failed);
502 log_info("cs: Pass!\n");
509 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
511 int fio_monotonic_clocktest(void)
513 log_info("cs: current platform does not support CPU clocks\n");