a4ee20df73b122c9a63cf7cd243e98e5186c1180
[fio.git] / gettime.c
1 /*
2  * Clock functions
3  */
4
5 #include <unistd.h>
6 #include <math.h>
7 #include <sys/time.h>
8 #include <time.h>
9
10 #include "fio.h"
11 #include "smalloc.h"
12
13 #include "hash.h"
14 #include "os/os.h"
15
16 #if defined(ARCH_HAVE_CPU_CLOCK)
17 #ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
18 static unsigned long cycles_per_usec;
19 static unsigned long inv_cycles_per_nsec;
20 static uint64_t max_cycles_for_mult;
21 #define NSEC_INV_FACTOR 4096
22 #endif
23 #ifdef ARCH_CPU_CLOCK_WRAPS
24 static unsigned long long cycles_start, cycles_wrap;
25 #endif
26 #endif
27 int tsc_reliable = 0;
28
29 struct tv_valid {
30         uint64_t last_cycles;
31         int last_tv_valid;
32         int warned;
33 };
34 #ifdef ARCH_HAVE_CPU_CLOCK
35 #ifdef CONFIG_TLS_THREAD
36 static __thread struct tv_valid static_tv_valid;
37 #else
38 static pthread_key_t tv_tls_key;
39 #endif
40 #endif
41
42 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
43 int fio_clock_source_set = 0;
44 static enum fio_cs fio_clock_source_inited = CS_INVAL;
45
46 #ifdef FIO_DEBUG_TIME
47
48 #define HASH_BITS       8
49 #define HASH_SIZE       (1 << HASH_BITS)
50
51 static struct flist_head hash[HASH_SIZE];
52 static int gtod_inited;
53
54 struct gtod_log {
55         struct flist_head list;
56         void *caller;
57         unsigned long calls;
58 };
59
60 static struct gtod_log *find_hash(void *caller)
61 {
62         unsigned long h = hash_ptr(caller, HASH_BITS);
63         struct flist_head *entry;
64
65         flist_for_each(entry, &hash[h]) {
66                 struct gtod_log *log = flist_entry(entry, struct gtod_log,
67                                                                         list);
68
69                 if (log->caller == caller)
70                         return log;
71         }
72
73         return NULL;
74 }
75
76 static void inc_caller(void *caller)
77 {
78         struct gtod_log *log = find_hash(caller);
79
80         if (!log) {
81                 unsigned long h;
82
83                 log = malloc(sizeof(*log));
84                 INIT_FLIST_HEAD(&log->list);
85                 log->caller = caller;
86                 log->calls = 0;
87
88                 h = hash_ptr(caller, HASH_BITS);
89                 flist_add_tail(&log->list, &hash[h]);
90         }
91
92         log->calls++;
93 }
94
95 static void gtod_log_caller(void *caller)
96 {
97         if (gtod_inited)
98                 inc_caller(caller);
99 }
100
101 static void fio_exit fio_dump_gtod(void)
102 {
103         unsigned long total_calls = 0;
104         int i;
105
106         for (i = 0; i < HASH_SIZE; i++) {
107                 struct flist_head *entry;
108                 struct gtod_log *log;
109
110                 flist_for_each(entry, &hash[i]) {
111                         log = flist_entry(entry, struct gtod_log, list);
112
113                         printf("function %p, calls %lu\n", log->caller,
114                                                                 log->calls);
115                         total_calls += log->calls;
116                 }
117         }
118
119         printf("Total %lu gettimeofday\n", total_calls);
120 }
121
122 static void fio_init gtod_init(void)
123 {
124         int i;
125
126         for (i = 0; i < HASH_SIZE; i++)
127                 INIT_FLIST_HEAD(&hash[i]);
128
129         gtod_inited = 1;
130 }
131
132 #endif /* FIO_DEBUG_TIME */
133
134 #ifdef CONFIG_CLOCK_GETTIME
135 static int fill_clock_gettime(struct timespec *ts)
136 {
137 #if defined(CONFIG_CLOCK_MONOTONIC_RAW)
138         return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
139 #elif defined(CONFIG_CLOCK_MONOTONIC)
140         return clock_gettime(CLOCK_MONOTONIC, ts);
141 #else
142         return clock_gettime(CLOCK_REALTIME, ts);
143 #endif
144 }
145 #endif
146
147 static void __fio_gettime(struct timespec *tp)
148 {
149         switch (fio_clock_source) {
150 #ifdef CONFIG_GETTIMEOFDAY
151         case CS_GTOD: {
152                 struct timeval tv;
153                 gettimeofday(&tv, NULL);
154
155                 tp->tv_sec = tv.tv_sec;
156                 tp->tv_nsec = tv.tv_usec * 1000;
157                 break;
158                 }
159 #endif
160 #ifdef CONFIG_CLOCK_GETTIME
161         case CS_CGETTIME: {
162                 if (fill_clock_gettime(tp) < 0) {
163                         log_err("fio: clock_gettime fails\n");
164                         assert(0);
165                 }
166                 break;
167                 }
168 #endif
169 #ifdef ARCH_HAVE_CPU_CLOCK
170         case CS_CPUCLOCK: {
171                 uint64_t nsecs, t;
172                 struct tv_valid *tv;
173
174 #ifdef CONFIG_TLS_THREAD
175                 tv = &static_tv_valid;
176 #else
177                 tv = pthread_getspecific(tv_tls_key);
178 #endif
179
180                 t = get_cpu_clock();
181 #ifdef ARCH_CPU_CLOCK_WRAPS
182                 if (t < cycles_start && !cycles_wrap)
183                         cycles_wrap = 1;
184                 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
185                         log_err("fio: double CPU clock wrap\n");
186                         tv->warned = 1;
187                 }
188
189                 t -= cycles_start;
190 #endif
191                 tv->last_cycles = t;
192                 tv->last_tv_valid = 1;
193 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
194                 nsecs = t * 1000 / ARCH_CPU_CLOCK_CYCLES_PER_USEC;
195 #else
196                 if (t < max_cycles_for_mult)
197                         nsecs = (t * inv_cycles_per_nsec) / NSEC_INV_FACTOR;
198                 else
199                         nsecs = (t / NSEC_INV_FACTOR) * inv_cycles_per_nsec;
200 #endif
201                 tp->tv_sec = nsecs / 1000000000ULL;
202                 tp->tv_nsec = nsecs % 1000000000ULL;
203                 break;
204                 }
205 #endif
206         default:
207                 log_err("fio: invalid clock source %d\n", fio_clock_source);
208                 break;
209         }
210 }
211
212 #ifdef FIO_DEBUG_TIME
213 void fio_gettime(struct timespec *tp, void *caller)
214 #else
215 void fio_gettime(struct timespec *tp, void fio_unused *caller)
216 #endif
217 {
218 #ifdef FIO_DEBUG_TIME
219         if (!caller)
220                 caller = __builtin_return_address(0);
221
222         gtod_log_caller(caller);
223 #endif
224         if (fio_unlikely(fio_gettime_offload(tp)))
225                 return;
226
227         __fio_gettime(tp);
228 }
229
230 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
231 static unsigned long get_cycles_per_usec(void)
232 {
233         struct timespec s, e;
234         uint64_t c_s, c_e;
235         enum fio_cs old_cs = fio_clock_source;
236         uint64_t elapsed;
237
238 #ifdef CONFIG_CLOCK_GETTIME
239         fio_clock_source = CS_CGETTIME;
240 #else
241         fio_clock_source = CS_GTOD;
242 #endif
243         __fio_gettime(&s);
244
245         c_s = get_cpu_clock();
246         do {
247                 __fio_gettime(&e);
248
249                 elapsed = utime_since(&s, &e);
250                 if (elapsed >= 1280) {
251                         c_e = get_cpu_clock();
252                         break;
253                 }
254         } while (1);
255
256         fio_clock_source = old_cs;
257         return (c_e - c_s) / elapsed;
258 }
259
260 #define NR_TIME_ITERS   50
261
262 static int calibrate_cpu_clock(void)
263 {
264         double delta, mean, S;
265         uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
266         int i, samples;
267
268         cycles[0] = get_cycles_per_usec();
269         S = delta = mean = 0.0;
270         for (i = 0; i < NR_TIME_ITERS; i++) {
271                 cycles[i] = get_cycles_per_usec();
272                 delta = cycles[i] - mean;
273                 if (delta) {
274                         mean += delta / (i + 1.0);
275                         S += delta * (cycles[i] - mean);
276                 }
277         }
278
279         /*
280          * The most common platform clock breakage is returning zero
281          * indefinitely. Check for that and return failure.
282          */
283         if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
284                 return 1;
285
286         S = sqrt(S / (NR_TIME_ITERS - 1.0));
287
288         minc = -1ULL;
289         maxc = samples = avg = 0;
290         for (i = 0; i < NR_TIME_ITERS; i++) {
291                 double this = cycles[i];
292
293                 minc = min(cycles[i], minc);
294                 maxc = max(cycles[i], maxc);
295
296                 if ((fmax(this, mean) - fmin(this, mean)) > S)
297                         continue;
298                 samples++;
299                 avg += this;
300         }
301
302         S /= (double) NR_TIME_ITERS;
303
304         for (i = 0; i < NR_TIME_ITERS; i++)
305                 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
306
307         avg /= samples;
308         dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
309         dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
310                         (unsigned long long) minc,
311                         (unsigned long long) maxc, mean, S);
312
313         cycles_per_usec = avg;
314         inv_cycles_per_nsec = NSEC_INV_FACTOR * 1000 / cycles_per_usec;
315         max_cycles_for_mult = ~0ULL / inv_cycles_per_nsec;
316         dprint(FD_TIME, "inv_cycles_per_nsec=%lu\n", inv_cycles_per_nsec);
317 #ifdef ARCH_CPU_CLOCK_WRAPS
318         cycles_start = get_cpu_clock();
319         dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
320 #endif
321         return 0;
322 }
323 #else
324 static int calibrate_cpu_clock(void)
325 {
326 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
327         return 0;
328 #else
329         return 1;
330 #endif
331 }
332 #endif // ARCH_HAVE_CPU_CLOCK
333
334 #ifndef CONFIG_TLS_THREAD
335 void fio_local_clock_init(int is_thread)
336 {
337         struct tv_valid *t;
338
339         t = calloc(1, sizeof(*t));
340         if (pthread_setspecific(tv_tls_key, t)) {
341                 log_err("fio: can't set TLS key\n");
342                 assert(0);
343         }
344 }
345
346 static void kill_tv_tls_key(void *data)
347 {
348         free(data);
349 }
350 #else
351 void fio_local_clock_init(int is_thread)
352 {
353 }
354 #endif
355
356 void fio_clock_init(void)
357 {
358         if (fio_clock_source == fio_clock_source_inited)
359                 return;
360
361 #ifndef CONFIG_TLS_THREAD
362         if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
363                 log_err("fio: can't create TLS key\n");
364 #endif
365
366         fio_clock_source_inited = fio_clock_source;
367
368         if (calibrate_cpu_clock())
369                 tsc_reliable = 0;
370
371         /*
372          * If the arch sets tsc_reliable != 0, then it must be good enough
373          * to use as THE clock source. For x86 CPUs, this means the TSC
374          * runs at a constant rate and is synced across CPU cores.
375          */
376         if (tsc_reliable) {
377                 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
378                         fio_clock_source = CS_CPUCLOCK;
379         } else if (fio_clock_source == CS_CPUCLOCK)
380                 log_info("fio: clocksource=cpu may not be reliable\n");
381 }
382
383 uint64_t utime_since(const struct timespec *s, const struct timespec *e)
384 {
385         int64_t sec, usec;
386
387         sec = e->tv_sec - s->tv_sec;
388         usec = (e->tv_nsec - s->tv_nsec) / 1000;
389         if (sec > 0 && usec < 0) {
390                 sec--;
391                 usec += 1000000;
392         }
393
394         /*
395          * time warp bug on some kernels?
396          */
397         if (sec < 0 || (sec == 0 && usec < 0))
398                 return 0;
399
400         return usec + (sec * 1000000);
401 }
402
403 uint64_t utime_since_now(const struct timespec *s)
404 {
405         struct timespec t;
406 #ifdef FIO_DEBUG_TIME
407         void *p = __builtin_return_address(0);
408
409         fio_gettime(&t, p);
410 #else
411         fio_gettime(&t, NULL);
412 #endif
413
414         return utime_since(s, &t);
415 }
416
417 uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
418 {
419         int64_t sec, usec;
420
421         sec = e->tv_sec - s->tv_sec;
422         usec = (e->tv_usec - s->tv_usec);
423         if (sec > 0 && usec < 0) {
424                 sec--;
425                 usec += 1000000;
426         }
427
428         if (sec < 0 || (sec == 0 && usec < 0))
429                 return 0;
430
431         sec *= 1000;
432         usec /= 1000;
433         return sec + usec;
434 }
435
436 uint64_t mtime_since_now(const struct timespec *s)
437 {
438         struct timespec t;
439 #ifdef FIO_DEBUG_TIME
440         void *p = __builtin_return_address(0);
441
442         fio_gettime(&t, p);
443 #else
444         fio_gettime(&t, NULL);
445 #endif
446
447         return mtime_since(s, &t);
448 }
449
450 uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
451 {
452         int64_t sec, usec;
453
454         sec = e->tv_sec - s->tv_sec;
455         usec = (e->tv_nsec - s->tv_nsec) / 1000;
456         if (sec > 0 && usec < 0) {
457                 sec--;
458                 usec += 1000000;
459         }
460
461         if (sec < 0 || (sec == 0 && usec < 0))
462                 return 0;
463
464         sec *= 1000;
465         usec /= 1000;
466         return sec + usec;
467 }
468
469 uint64_t time_since_now(const struct timespec *s)
470 {
471         return mtime_since_now(s) / 1000;
472 }
473
474 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)  && \
475     defined(CONFIG_SFAA)
476
477 #define CLOCK_ENTRIES_DEBUG     100000
478 #define CLOCK_ENTRIES_TEST      10000
479
480 struct clock_entry {
481         uint32_t seq;
482         uint32_t cpu;
483         uint64_t tsc;
484 };
485
486 struct clock_thread {
487         pthread_t thread;
488         int cpu;
489         int debug;
490         pthread_mutex_t lock;
491         pthread_mutex_t started;
492         unsigned long nr_entries;
493         uint32_t *seq;
494         struct clock_entry *entries;
495 };
496
497 static inline uint32_t atomic32_inc_return(uint32_t *seq)
498 {
499         return 1 + __sync_fetch_and_add(seq, 1);
500 }
501
502 static void *clock_thread_fn(void *data)
503 {
504         struct clock_thread *t = data;
505         struct clock_entry *c;
506         os_cpu_mask_t cpu_mask;
507         uint32_t last_seq;
508         unsigned long long first;
509         int i;
510
511         if (fio_cpuset_init(&cpu_mask)) {
512                 int __err = errno;
513
514                 log_err("clock cpuset init failed: %s\n", strerror(__err));
515                 goto err_out;
516         }
517
518         fio_cpu_set(&cpu_mask, t->cpu);
519
520         if (fio_setaffinity(gettid(), cpu_mask) == -1) {
521                 int __err = errno;
522
523                 log_err("clock setaffinity failed: %s\n", strerror(__err));
524                 goto err;
525         }
526
527         pthread_mutex_lock(&t->lock);
528         pthread_mutex_unlock(&t->started);
529
530         first = get_cpu_clock();
531         last_seq = 0;
532         c = &t->entries[0];
533         for (i = 0; i < t->nr_entries; i++, c++) {
534                 uint32_t seq;
535                 uint64_t tsc;
536
537                 c->cpu = t->cpu;
538                 do {
539                         seq = atomic32_inc_return(t->seq);
540                         if (seq < last_seq)
541                                 break;
542                         tsc = get_cpu_clock();
543                 } while (seq != *t->seq);
544
545                 c->seq = seq;
546                 c->tsc = tsc;
547         }
548
549         if (t->debug) {
550                 unsigned long long clocks;
551
552                 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
553                 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
554                                                         clocks, first);
555         }
556
557         /*
558          * The most common platform clock breakage is returning zero
559          * indefinitely. Check for that and return failure.
560          */
561         if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
562                 goto err;
563
564         fio_cpuset_exit(&cpu_mask);
565         return NULL;
566 err:
567         fio_cpuset_exit(&cpu_mask);
568 err_out:
569         return (void *) 1;
570 }
571
572 static int clock_cmp(const void *p1, const void *p2)
573 {
574         const struct clock_entry *c1 = p1;
575         const struct clock_entry *c2 = p2;
576
577         if (c1->seq == c2->seq)
578                 log_err("cs: bug in atomic sequence!\n");
579
580         return c1->seq - c2->seq;
581 }
582
583 int fio_monotonic_clocktest(int debug)
584 {
585         struct clock_thread *cthreads;
586         unsigned int nr_cpus = cpus_online();
587         struct clock_entry *entries;
588         unsigned long nr_entries, tentries, failed = 0;
589         struct clock_entry *prev, *this;
590         uint32_t seq = 0;
591         unsigned int i;
592
593         if (debug) {
594                 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
595
596 #ifdef FIO_INC_DEBUG
597                 fio_debug |= 1U << FD_TIME;
598 #endif
599                 nr_entries = CLOCK_ENTRIES_DEBUG;
600         } else
601                 nr_entries = CLOCK_ENTRIES_TEST;
602
603         calibrate_cpu_clock();
604
605         if (debug) {
606 #ifdef FIO_INC_DEBUG
607                 fio_debug &= ~(1U << FD_TIME);
608 #endif
609         }
610
611         cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
612         tentries = nr_entries * nr_cpus;
613         entries = malloc(tentries * sizeof(struct clock_entry));
614
615         if (debug)
616                 log_info("cs: Testing %u CPUs\n", nr_cpus);
617
618         for (i = 0; i < nr_cpus; i++) {
619                 struct clock_thread *t = &cthreads[i];
620
621                 t->cpu = i;
622                 t->debug = debug;
623                 t->seq = &seq;
624                 t->nr_entries = nr_entries;
625                 t->entries = &entries[i * nr_entries];
626                 pthread_mutex_init(&t->lock, NULL);
627                 pthread_mutex_init(&t->started, NULL);
628                 pthread_mutex_lock(&t->lock);
629                 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
630                         failed++;
631                         nr_cpus = i;
632                         break;
633                 }
634         }
635
636         for (i = 0; i < nr_cpus; i++) {
637                 struct clock_thread *t = &cthreads[i];
638
639                 pthread_mutex_lock(&t->started);
640         }
641
642         for (i = 0; i < nr_cpus; i++) {
643                 struct clock_thread *t = &cthreads[i];
644
645                 pthread_mutex_unlock(&t->lock);
646         }
647
648         for (i = 0; i < nr_cpus; i++) {
649                 struct clock_thread *t = &cthreads[i];
650                 void *ret;
651
652                 pthread_join(t->thread, &ret);
653                 if (ret)
654                         failed++;
655         }
656         free(cthreads);
657
658         if (failed) {
659                 if (debug)
660                         log_err("Clocksource test: %lu threads failed\n", failed);
661                 goto err;
662         }
663
664         qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
665
666         /* silence silly gcc */
667         prev = NULL;
668         for (failed = i = 0; i < tentries; i++) {
669                 this = &entries[i];
670
671                 if (!i) {
672                         prev = this;
673                         continue;
674                 }
675
676                 if (prev->tsc > this->tsc) {
677                         uint64_t diff = prev->tsc - this->tsc;
678
679                         if (!debug) {
680                                 failed++;
681                                 break;
682                         }
683
684                         log_info("cs: CPU clock mismatch (diff=%llu):\n",
685                                                 (unsigned long long) diff);
686                         log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
687                         log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
688                         failed++;
689                 }
690
691                 prev = this;
692         }
693
694         if (debug) {
695                 if (failed)
696                         log_info("cs: Failed: %lu\n", failed);
697                 else
698                         log_info("cs: Pass!\n");
699         }
700 err:
701         free(entries);
702         return !!failed;
703 }
704
705 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
706
707 int fio_monotonic_clocktest(int debug)
708 {
709         if (debug)
710                 log_info("cs: current platform does not support CPU clocks\n");
711         return 1;
712 }
713
714 #endif