Use clock_gettime() for CPU clock calibration
[fio.git] / gettime.c
CommitLineData
02bcaa8c 1/*
f5cc024a 2 * Clock functions
02bcaa8c 3 */
f5cc024a 4
02bcaa8c 5#include <unistd.h>
c223da83 6#include <math.h>
02bcaa8c 7#include <sys/time.h>
03e20d68 8#include <time.h>
02bcaa8c
JA
9
10#include "fio.h"
be4ecfdf 11#include "smalloc.h"
02bcaa8c
JA
12
13#include "hash.h"
7d11f871 14#include "os/os.h"
02bcaa8c 15
09a32402 16#ifdef ARCH_HAVE_CPU_CLOCK
c223da83 17static unsigned long cycles_per_usec;
fa80feae 18int tsc_reliable = 0;
09a32402 19#endif
5d879392
JA
20
21struct tv_valid {
22 struct timeval last_tv;
23 int last_tv_valid;
93f0b09a 24 unsigned long last_cycles;
5d879392
JA
25};
26static pthread_key_t tv_tls_key;
02bcaa8c 27
16de1bf9 28enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
fa80feae 29int fio_clock_source_set = 0;
01423eae 30enum fio_cs fio_clock_source_inited = CS_INVAL;
c223da83 31
02bcaa8c
JA
32#ifdef FIO_DEBUG_TIME
33
34#define HASH_BITS 8
35#define HASH_SIZE (1 << HASH_BITS)
36
01743ee1 37static struct flist_head hash[HASH_SIZE];
02bcaa8c
JA
38static int gtod_inited;
39
40struct gtod_log {
01743ee1 41 struct flist_head list;
02bcaa8c
JA
42 void *caller;
43 unsigned long calls;
44};
45
46static struct gtod_log *find_hash(void *caller)
47{
48 unsigned long h = hash_ptr(caller, HASH_BITS);
01743ee1 49 struct flist_head *entry;
02bcaa8c 50
01743ee1
JA
51 flist_for_each(entry, &hash[h]) {
52 struct gtod_log *log = flist_entry(entry, struct gtod_log,
53 list);
02bcaa8c
JA
54
55 if (log->caller == caller)
56 return log;
57 }
58
59 return NULL;
60}
61
62static struct gtod_log *find_log(void *caller)
63{
64 struct gtod_log *log = find_hash(caller);
65
66 if (!log) {
67 unsigned long h;
68
69 log = malloc(sizeof(*log));
01743ee1 70 INIT_FLIST_HEAD(&log->list);
02bcaa8c
JA
71 log->caller = caller;
72 log->calls = 0;
73
74 h = hash_ptr(caller, HASH_BITS);
01743ee1 75 flist_add_tail(&log->list, &hash[h]);
02bcaa8c
JA
76 }
77
78 return log;
79}
80
81static void gtod_log_caller(void *caller)
82{
83 if (gtod_inited) {
84 struct gtod_log *log = find_log(caller);
85
86 log->calls++;
87 }
88}
89
90static void fio_exit fio_dump_gtod(void)
91{
92 unsigned long total_calls = 0;
93 int i;
94
95 for (i = 0; i < HASH_SIZE; i++) {
01743ee1 96 struct flist_head *entry;
02bcaa8c
JA
97 struct gtod_log *log;
98
01743ee1
JA
99 flist_for_each(entry, &hash[i]) {
100 log = flist_entry(entry, struct gtod_log, list);
02bcaa8c 101
5ec10eaa
JA
102 printf("function %p, calls %lu\n", log->caller,
103 log->calls);
02bcaa8c
JA
104 total_calls += log->calls;
105 }
106 }
107
108 printf("Total %lu gettimeofday\n", total_calls);
109}
110
111static void fio_init gtod_init(void)
112{
113 int i;
114
115 for (i = 0; i < HASH_SIZE; i++)
01743ee1 116 INIT_FLIST_HEAD(&hash[i]);
02bcaa8c
JA
117
118 gtod_inited = 1;
119}
120
121#endif /* FIO_DEBUG_TIME */
122
9ff1c070
JA
123static int fill_clock_gettime(struct timespec *ts)
124{
125#ifdef FIO_HAVE_CLOCK_MONOTONIC
126 return clock_gettime(CLOCK_MONOTONIC, ts);
127#else
128 return clock_gettime(CLOCK_REALTIME, ts);
129#endif
130}
131
1e97cce9 132#ifdef FIO_DEBUG_TIME
02bcaa8c 133void fio_gettime(struct timeval *tp, void *caller)
1e97cce9
JA
134#else
135void fio_gettime(struct timeval *tp, void fio_unused *caller)
136#endif
02bcaa8c 137{
93f0b09a 138 struct tv_valid *tv;
5d879392 139
02bcaa8c
JA
140#ifdef FIO_DEBUG_TIME
141 if (!caller)
142 caller = __builtin_return_address(0);
143
144 gtod_log_caller(caller);
02bcaa8c 145#endif
be4ecfdf
JA
146 if (fio_tv) {
147 memcpy(tp, fio_tv, sizeof(*tp));
148 return;
c223da83
JA
149 }
150
93f0b09a
JA
151 tv = pthread_getspecific(tv_tls_key);
152
c223da83
JA
153 switch (fio_clock_source) {
154 case CS_GTOD:
02bcaa8c 155 gettimeofday(tp, NULL);
c223da83
JA
156 break;
157 case CS_CGETTIME: {
02bcaa8c
JA
158 struct timespec ts;
159
9ff1c070 160 if (fill_clock_gettime(&ts) < 0) {
c223da83
JA
161 log_err("fio: clock_gettime fails\n");
162 assert(0);
02bcaa8c
JA
163 }
164
165 tp->tv_sec = ts.tv_sec;
166 tp->tv_usec = ts.tv_nsec / 1000;
c223da83
JA
167 break;
168 }
169#ifdef ARCH_HAVE_CPU_CLOCK
170 case CS_CPUCLOCK: {
171 unsigned long long usecs, t;
172
173 t = get_cpu_clock();
93f0b09a 174 if (tv && t < tv->last_cycles) {
c223da83 175 dprint(FD_TIME, "CPU clock going back in time\n");
93f0b09a
JA
176 t = tv->last_cycles;
177 } else if (tv)
178 tv->last_cycles = t;
c223da83
JA
179
180 usecs = t / cycles_per_usec;
181 tp->tv_sec = usecs / 1000000;
182 tp->tv_usec = usecs % 1000000;
c223da83
JA
183 break;
184 }
185#endif
186 default:
187 log_err("fio: invalid clock source %d\n", fio_clock_source);
188 break;
02bcaa8c 189 }
3e488920
JA
190
191 /*
192 * If Linux is using the tsc clock on non-synced processors,
193 * sometimes time can appear to drift backwards. Fix that up.
194 */
93f0b09a
JA
195 if (tv) {
196 if (tv->last_tv_valid) {
197 if (tp->tv_sec < tv->last_tv.tv_sec)
198 tp->tv_sec = tv->last_tv.tv_sec;
199 else if (tv->last_tv.tv_sec == tp->tv_sec &&
200 tp->tv_usec < tv->last_tv.tv_usec)
201 tp->tv_usec = tv->last_tv.tv_usec;
5d879392 202 }
93f0b09a
JA
203 tv->last_tv_valid = 1;
204 memcpy(&tv->last_tv, tp, sizeof(*tp));
3e488920 205 }
02bcaa8c 206}
be4ecfdf 207
09a32402 208#ifdef ARCH_HAVE_CPU_CLOCK
c223da83
JA
209static unsigned long get_cycles_per_usec(void)
210{
9ff1c070 211 struct timespec ts;
c223da83
JA
212 struct timeval s, e;
213 unsigned long long c_s, c_e;
214
9ff1c070
JA
215 fill_clock_gettime(&ts);
216 s.tv_sec = ts.tv_sec;
217 s.tv_usec = ts.tv_nsec / 1000;
218
c223da83
JA
219 c_s = get_cpu_clock();
220 do {
221 unsigned long long elapsed;
222
9ff1c070
JA
223 fill_clock_gettime(&ts);
224 e.tv_sec = ts.tv_sec;
225 e.tv_usec = ts.tv_nsec / 1000;
226
c223da83 227 elapsed = utime_since(&s, &e);
486332e5 228 if (elapsed >= 1280) {
c223da83
JA
229 c_e = get_cpu_clock();
230 break;
231 }
232 } while (1);
233
c011000d 234 return (c_e - c_s + 127) >> 7;
c223da83
JA
235}
236
fa80feae
JA
237#define NR_TIME_ITERS 50
238
09a32402 239static void calibrate_cpu_clock(void)
c223da83
JA
240{
241 double delta, mean, S;
fa80feae 242 unsigned long avg, cycles[NR_TIME_ITERS];
c223da83
JA
243 int i, samples;
244
c223da83
JA
245 cycles[0] = get_cycles_per_usec();
246 S = delta = mean = 0.0;
fa80feae 247 for (i = 0; i < NR_TIME_ITERS; i++) {
c223da83
JA
248 cycles[i] = get_cycles_per_usec();
249 delta = cycles[i] - mean;
250 if (delta) {
251 mean += delta / (i + 1.0);
252 S += delta * (cycles[i] - mean);
253 }
254 }
255
fa80feae 256 S = sqrt(S / (NR_TIME_ITERS - 1.0));
c223da83
JA
257
258 samples = avg = 0;
fa80feae 259 for (i = 0; i < NR_TIME_ITERS; i++) {
c223da83
JA
260 double this = cycles[i];
261
03e20d68 262 if ((fmax(this, mean) - fmin(this, mean)) > S)
c223da83
JA
263 continue;
264 samples++;
265 avg += this;
266 }
267
fa80feae 268 S /= (double) NR_TIME_ITERS;
89db727d 269 mean /= 10.0;
c223da83 270
fa80feae 271 for (i = 0; i < NR_TIME_ITERS; i++)
c223da83
JA
272 dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
273
d7abad3d
JA
274 avg /= samples;
275 avg = (avg + 9) / 10;
c223da83
JA
276 dprint(FD_TIME, "avg: %lu\n", avg);
277 dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
278
279 cycles_per_usec = avg;
09a32402
JA
280}
281#else
282static void calibrate_cpu_clock(void)
283{
284}
285#endif
286
5d879392
JA
287void fio_local_clock_init(int is_thread)
288{
289 struct tv_valid *t;
290
291 t = calloc(sizeof(*t), 1);
292 if (pthread_setspecific(tv_tls_key, t))
293 log_err("fio: can't set TLS key\n");
294}
295
296static void kill_tv_tls_key(void *data)
297{
298 free(data);
299}
300
09a32402
JA
301void fio_clock_init(void)
302{
01423eae
JA
303 if (fio_clock_source == fio_clock_source_inited)
304 return;
305
5d879392
JA
306 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
307 log_err("fio: can't create TLS key\n");
308
01423eae 309 fio_clock_source_inited = fio_clock_source;
09a32402 310 calibrate_cpu_clock();
fa80feae
JA
311
312 /*
313 * If the arch sets tsc_reliable != 0, then it must be good enough
314 * to use as THE clock source. For x86 CPUs, this means the TSC
315 * runs at a constant rate and is synced across CPU cores.
316 */
317 if (tsc_reliable) {
318 if (!fio_clock_source_set)
319 fio_clock_source = CS_CPUCLOCK;
320 } else if (fio_clock_source == CS_CPUCLOCK)
321 log_info("fio: clocksource=cpu may not be reliable\n");
c223da83
JA
322}
323
39ab7da2 324unsigned long long utime_since(struct timeval *s, struct timeval *e)
be4ecfdf 325{
39ab7da2
JA
326 long sec, usec;
327 unsigned long long ret;
328
329 sec = e->tv_sec - s->tv_sec;
330 usec = e->tv_usec - s->tv_usec;
331 if (sec > 0 && usec < 0) {
332 sec--;
333 usec += 1000000;
334 }
335
336 /*
337 * time warp bug on some kernels?
338 */
339 if (sec < 0 || (sec == 0 && usec < 0))
340 return 0;
341
342 ret = sec * 1000000ULL + usec;
343
344 return ret;
be4ecfdf
JA
345}
346
39ab7da2 347unsigned long long utime_since_now(struct timeval *s)
be4ecfdf 348{
39ab7da2
JA
349 struct timeval t;
350
351 fio_gettime(&t, NULL);
352 return utime_since(s, &t);
be4ecfdf 353}
783a3eb1 354
39ab7da2 355unsigned long mtime_since(struct timeval *s, struct timeval *e)
783a3eb1 356{
39ab7da2 357 long sec, usec, ret;
783a3eb1 358
39ab7da2
JA
359 sec = e->tv_sec - s->tv_sec;
360 usec = e->tv_usec - s->tv_usec;
361 if (sec > 0 && usec < 0) {
362 sec--;
363 usec += 1000000;
783a3eb1
JA
364 }
365
39ab7da2
JA
366 if (sec < 0 || (sec == 0 && usec < 0))
367 return 0;
368
369 sec *= 1000UL;
370 usec /= 1000UL;
371 ret = sec + usec;
372
373 return ret;
783a3eb1
JA
374}
375
39ab7da2 376unsigned long mtime_since_now(struct timeval *s)
783a3eb1 377{
39ab7da2
JA
378 struct timeval t;
379 void *p = __builtin_return_address(0);
783a3eb1 380
39ab7da2
JA
381 fio_gettime(&t, p);
382 return mtime_since(s, &t);
383}
783a3eb1 384
39ab7da2
JA
385unsigned long time_since_now(struct timeval *s)
386{
387 return mtime_since_now(s) / 1000;
783a3eb1 388}
7d11f871
JA
389
390#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)
391
392#define CLOCK_ENTRIES 100000
393
394struct clock_entry {
395 unsigned long seq;
396 unsigned long tsc;
397 unsigned long cpu;
398};
399
400struct clock_thread {
401 pthread_t thread;
402 int cpu;
403 pthread_mutex_t lock;
404 pthread_mutex_t started;
405 uint64_t *seq;
406 struct clock_entry *entries;
407};
408
409static inline uint64_t atomic64_inc_return(uint64_t *seq)
410{
411 return 1 + __sync_fetch_and_add(seq, 1);
412}
413
414static void *clock_thread_fn(void *data)
415{
416 struct clock_thread *t = data;
417 struct clock_entry *c;
418 os_cpu_mask_t cpu_mask;
419 int i;
420
421 memset(&cpu_mask, 0, sizeof(cpu_mask));
422 fio_cpu_set(&cpu_mask, t->cpu);
423
424 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
425 log_err("clock setaffinity failed\n");
426 return (void *) 1;
427 }
428
7d11f871 429 pthread_mutex_lock(&t->lock);
b9b3498e 430 pthread_mutex_unlock(&t->started);
7d11f871
JA
431
432 c = &t->entries[0];
433 for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
434 uint64_t seq, tsc;
435
436 c->cpu = t->cpu;
437 do {
438 seq = atomic64_inc_return(t->seq);
439 tsc = get_cpu_clock();
440 } while (seq != *t->seq);
441
442 c->seq = seq;
443 c->tsc = tsc;
444 }
445
446 log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[CLOCK_ENTRIES - 1].tsc - t->entries[0].tsc);
447 return NULL;
448}
449
450static int clock_cmp(const void *p1, const void *p2)
451{
452 const struct clock_entry *c1 = p1;
453 const struct clock_entry *c2 = p2;
454
b9b3498e
JA
455 if (c1->seq == c2->seq)
456 log_err("cs: bug in atomic sequence!\n");
457
7d11f871
JA
458 return c1->seq - c2->seq;
459}
460
461int fio_monotonic_clocktest(void)
462{
463 struct clock_thread *threads;
464 unsigned int nr_cpus = cpus_online();
465 struct clock_entry *entries;
466 unsigned long tentries, failed;
467 uint64_t seq = 0;
468 int i;
469
4f1d43c2
JA
470 fio_debug |= 1U << FD_TIME;
471 calibrate_cpu_clock();
472 fio_debug &= ~(1U << FD_TIME);
473
7d11f871
JA
474 threads = malloc(nr_cpus * sizeof(struct clock_thread));
475 tentries = CLOCK_ENTRIES * nr_cpus;
476 entries = malloc(tentries * sizeof(struct clock_entry));
477
478 log_info("cs: Testing %u CPUs\n", nr_cpus);
479
480 for (i = 0; i < nr_cpus; i++) {
481 struct clock_thread *t = &threads[i];
482
483 t->cpu = i;
484 t->seq = &seq;
485 t->entries = &entries[i * CLOCK_ENTRIES];
486 pthread_mutex_init(&t->lock, NULL);
487 pthread_mutex_init(&t->started, NULL);
488 pthread_mutex_lock(&t->lock);
489 pthread_create(&t->thread, NULL, clock_thread_fn, t);
490 }
491
492 for (i = 0; i < nr_cpus; i++) {
493 struct clock_thread *t = &threads[i];
494
495 pthread_mutex_lock(&t->started);
496 }
497
498 for (i = 0; i < nr_cpus; i++) {
499 struct clock_thread *t = &threads[i];
500
501 pthread_mutex_unlock(&t->lock);
502 }
503
504 for (failed = i = 0; i < nr_cpus; i++) {
505 struct clock_thread *t = &threads[i];
506 void *ret;
507
508 pthread_join(t->thread, &ret);
509 if (ret)
510 failed++;
511 }
512 free(threads);
513
514 if (failed) {
515 log_err("Clocksource test: %u threads failed\n", failed);
516 goto err;
517 }
518
519 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
520
521 for (failed = i = 0; i < tentries; i++) {
522 struct clock_entry *prev, *this = &entries[i];
523
524 if (!i) {
525 prev = this;
526 continue;
527 }
528
529 if (prev->tsc > this->tsc) {
530 uint64_t diff = prev->tsc - this->tsc;
531
532 log_info("cs: CPU clock mismatch (diff=%lu):\n", diff);
533 log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", prev->cpu, prev->tsc, prev->seq);
534 log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", this->cpu, this->tsc, this->seq);
535 failed++;
536 }
537
538 prev = this;
539 }
540
541 if (failed)
542 log_info("cs: Failed: %lu\n", failed);
543 else
544 log_info("cs: Pass!\n");
545
546err:
547 free(entries);
548 return !!failed;
549}
550
551#else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
552
553int fio_monotonic_clocktest(void)
554{
555 log_info("cs: current platform does not support CPU clocks\n");
556 return 0;
557}
558
559#endif