stat: Add iops stat and sample number information to terse format
[fio.git] / gettime.c
... / ...
CommitLineData
1/*
2 * Clock functions
3 */
4
5#include <unistd.h>
6#include <math.h>
7#include <sys/time.h>
8#include <time.h>
9
10#include "fio.h"
11#include "smalloc.h"
12
13#include "hash.h"
14#include "os/os.h"
15
16#if defined(ARCH_HAVE_CPU_CLOCK)
17#ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
18static unsigned long cycles_per_msec;
19static unsigned long long cycles_start;
20static unsigned long long clock_mult;
21static unsigned long long max_cycles_mask;
22static unsigned long long nsecs_for_max_cycles;
23static unsigned int clock_shift;
24static unsigned int max_cycles_shift;
25#define MAX_CLOCK_SEC 60*60
26#endif
27#ifdef ARCH_CPU_CLOCK_WRAPS
28static unsigned int cycles_wrap;
29#endif
30#endif
31bool tsc_reliable = false;
32
33struct tv_valid {
34 int warned;
35};
36#ifdef ARCH_HAVE_CPU_CLOCK
37#ifdef CONFIG_TLS_THREAD
38static __thread struct tv_valid static_tv_valid;
39#else
40static pthread_key_t tv_tls_key;
41#endif
42#endif
43
44enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
45int fio_clock_source_set = 0;
46static enum fio_cs fio_clock_source_inited = CS_INVAL;
47
48#ifdef FIO_DEBUG_TIME
49
50#define HASH_BITS 8
51#define HASH_SIZE (1 << HASH_BITS)
52
53static struct flist_head hash[HASH_SIZE];
54static int gtod_inited;
55
56struct gtod_log {
57 struct flist_head list;
58 void *caller;
59 unsigned long calls;
60};
61
62static struct gtod_log *find_hash(void *caller)
63{
64 unsigned long h = hash_ptr(caller, HASH_BITS);
65 struct flist_head *entry;
66
67 flist_for_each(entry, &hash[h]) {
68 struct gtod_log *log = flist_entry(entry, struct gtod_log,
69 list);
70
71 if (log->caller == caller)
72 return log;
73 }
74
75 return NULL;
76}
77
78static void inc_caller(void *caller)
79{
80 struct gtod_log *log = find_hash(caller);
81
82 if (!log) {
83 unsigned long h;
84
85 log = malloc(sizeof(*log));
86 INIT_FLIST_HEAD(&log->list);
87 log->caller = caller;
88 log->calls = 0;
89
90 h = hash_ptr(caller, HASH_BITS);
91 flist_add_tail(&log->list, &hash[h]);
92 }
93
94 log->calls++;
95}
96
97static void gtod_log_caller(void *caller)
98{
99 if (gtod_inited)
100 inc_caller(caller);
101}
102
103static void fio_exit fio_dump_gtod(void)
104{
105 unsigned long total_calls = 0;
106 int i;
107
108 for (i = 0; i < HASH_SIZE; i++) {
109 struct flist_head *entry;
110 struct gtod_log *log;
111
112 flist_for_each(entry, &hash[i]) {
113 log = flist_entry(entry, struct gtod_log, list);
114
115 printf("function %p, calls %lu\n", log->caller,
116 log->calls);
117 total_calls += log->calls;
118 }
119 }
120
121 printf("Total %lu gettimeofday\n", total_calls);
122}
123
124static void fio_init gtod_init(void)
125{
126 int i;
127
128 for (i = 0; i < HASH_SIZE; i++)
129 INIT_FLIST_HEAD(&hash[i]);
130
131 gtod_inited = 1;
132}
133
134#endif /* FIO_DEBUG_TIME */
135
136#ifdef CONFIG_CLOCK_GETTIME
137static int fill_clock_gettime(struct timespec *ts)
138{
139#if defined(CONFIG_CLOCK_MONOTONIC_RAW)
140 return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
141#elif defined(CONFIG_CLOCK_MONOTONIC)
142 return clock_gettime(CLOCK_MONOTONIC, ts);
143#else
144 return clock_gettime(CLOCK_REALTIME, ts);
145#endif
146}
147#endif
148
149static void __fio_gettime(struct timespec *tp)
150{
151 switch (fio_clock_source) {
152#ifdef CONFIG_GETTIMEOFDAY
153 case CS_GTOD: {
154 struct timeval tv;
155 gettimeofday(&tv, NULL);
156
157 tp->tv_sec = tv.tv_sec;
158 tp->tv_nsec = tv.tv_usec * 1000;
159 break;
160 }
161#endif
162#ifdef CONFIG_CLOCK_GETTIME
163 case CS_CGETTIME: {
164 if (fill_clock_gettime(tp) < 0) {
165 log_err("fio: clock_gettime fails\n");
166 assert(0);
167 }
168 break;
169 }
170#endif
171#ifdef ARCH_HAVE_CPU_CLOCK
172 case CS_CPUCLOCK: {
173 uint64_t nsecs, t, multiples;
174 struct tv_valid *tv;
175
176#ifdef CONFIG_TLS_THREAD
177 tv = &static_tv_valid;
178#else
179 tv = pthread_getspecific(tv_tls_key);
180#endif
181
182 t = get_cpu_clock();
183#ifdef ARCH_CPU_CLOCK_WRAPS
184 if (t < cycles_start && !cycles_wrap)
185 cycles_wrap = 1;
186 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
187 log_err("fio: double CPU clock wrap\n");
188 tv->warned = 1;
189 }
190#endif
191#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
192 nsecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC * 1000;
193#else
194 t -= cycles_start;
195 multiples = t >> max_cycles_shift;
196 nsecs = multiples * nsecs_for_max_cycles;
197 nsecs += ((t & max_cycles_mask) * clock_mult) >> clock_shift;
198#endif
199 tp->tv_sec = nsecs / 1000000000ULL;
200 tp->tv_nsec = nsecs % 1000000000ULL;
201 break;
202 }
203#endif
204 default:
205 log_err("fio: invalid clock source %d\n", fio_clock_source);
206 break;
207 }
208}
209
210#ifdef FIO_DEBUG_TIME
211void fio_gettime(struct timespec *tp, void *caller)
212#else
213void fio_gettime(struct timespec *tp, void fio_unused *caller)
214#endif
215{
216#ifdef FIO_DEBUG_TIME
217 if (!caller)
218 caller = __builtin_return_address(0);
219
220 gtod_log_caller(caller);
221#endif
222 if (fio_unlikely(fio_gettime_offload(tp)))
223 return;
224
225 __fio_gettime(tp);
226}
227
228#if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
229static unsigned long get_cycles_per_msec(void)
230{
231 struct timespec s, e;
232 uint64_t c_s, c_e;
233 enum fio_cs old_cs = fio_clock_source;
234 uint64_t elapsed;
235
236#ifdef CONFIG_CLOCK_GETTIME
237 fio_clock_source = CS_CGETTIME;
238#else
239 fio_clock_source = CS_GTOD;
240#endif
241 __fio_gettime(&s);
242
243 c_s = get_cpu_clock();
244 do {
245 __fio_gettime(&e);
246
247 elapsed = utime_since(&s, &e);
248 if (elapsed >= 1280) {
249 c_e = get_cpu_clock();
250 break;
251 }
252 } while (1);
253
254 fio_clock_source = old_cs;
255 return (c_e - c_s) * 1000 / elapsed;
256}
257
258#define NR_TIME_ITERS 50
259
260static int calibrate_cpu_clock(void)
261{
262 double delta, mean, S;
263 uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
264 int i, samples, sft = 0;
265 unsigned long long tmp, max_ticks, max_mult;
266
267 cycles[0] = get_cycles_per_msec();
268 S = delta = mean = 0.0;
269 for (i = 0; i < NR_TIME_ITERS; i++) {
270 cycles[i] = get_cycles_per_msec();
271 delta = cycles[i] - mean;
272 if (delta) {
273 mean += delta / (i + 1.0);
274 S += delta * (cycles[i] - mean);
275 }
276 }
277
278 /*
279 * The most common platform clock breakage is returning zero
280 * indefinitely. Check for that and return failure.
281 */
282 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
283 return 1;
284
285 S = sqrt(S / (NR_TIME_ITERS - 1.0));
286
287 minc = -1ULL;
288 maxc = samples = avg = 0;
289 for (i = 0; i < NR_TIME_ITERS; i++) {
290 double this = cycles[i];
291
292 minc = min(cycles[i], minc);
293 maxc = max(cycles[i], maxc);
294
295 if ((fmax(this, mean) - fmin(this, mean)) > S)
296 continue;
297 samples++;
298 avg += this;
299 }
300
301 S /= (double) NR_TIME_ITERS;
302
303 for (i = 0; i < NR_TIME_ITERS; i++)
304 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
305
306 avg /= samples;
307 cycles_per_msec = avg;
308 dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
309 dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
310 (unsigned long long) minc,
311 (unsigned long long) maxc, mean, S);
312
313 max_ticks = MAX_CLOCK_SEC * cycles_per_msec * 1000ULL;
314 max_mult = ULLONG_MAX / max_ticks;
315 dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, "
316 "max_mult=%llu\n", max_ticks,
317 __builtin_clzll(max_ticks), max_mult);
318
319 /*
320 * Find the largest shift count that will produce
321 * a multiplier that does not exceed max_mult
322 */
323 tmp = max_mult * cycles_per_msec / 1000000;
324 while (tmp > 1) {
325 tmp >>= 1;
326 sft++;
327 dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
328 }
329
330 clock_shift = sft;
331 clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
332 dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift,
333 clock_mult);
334
335 /*
336 * Find the greatest power of 2 clock ticks that is less than the
337 * ticks in MAX_CLOCK_SEC_2STAGE
338 */
339 max_cycles_shift = max_cycles_mask = 0;
340 tmp = MAX_CLOCK_SEC * 1000ULL * cycles_per_msec;
341 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp,
342 max_cycles_shift);
343 while (tmp > 1) {
344 tmp >>= 1;
345 max_cycles_shift++;
346 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
347 }
348 /*
349 * if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec
350 * here we will have a discontinuity every
351 * (1ULL << max_cycles_shift) cycles
352 */
353 nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult)
354 >> clock_shift;
355
356 /* Use a bitmask to calculate ticks % (1ULL << max_cycles_shift) */
357 for (tmp = 0; tmp < max_cycles_shift; tmp++)
358 max_cycles_mask |= 1ULL << tmp;
359
360 dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, "
361 "nsecs_for_max_cycles=%llu, "
362 "max_cycles_mask=%016llx\n",
363 max_cycles_shift, (1ULL << max_cycles_shift),
364 nsecs_for_max_cycles, max_cycles_mask);
365
366 cycles_start = get_cpu_clock();
367 dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
368 return 0;
369}
370#else
371static int calibrate_cpu_clock(void)
372{
373#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
374 return 0;
375#else
376 return 1;
377#endif
378}
379#endif // ARCH_HAVE_CPU_CLOCK
380
381#ifndef CONFIG_TLS_THREAD
382void fio_local_clock_init(int is_thread)
383{
384 struct tv_valid *t;
385
386 t = calloc(1, sizeof(*t));
387 if (pthread_setspecific(tv_tls_key, t)) {
388 log_err("fio: can't set TLS key\n");
389 assert(0);
390 }
391}
392
393static void kill_tv_tls_key(void *data)
394{
395 free(data);
396}
397#else
398void fio_local_clock_init(int is_thread)
399{
400}
401#endif
402
403void fio_clock_init(void)
404{
405 if (fio_clock_source == fio_clock_source_inited)
406 return;
407
408#ifndef CONFIG_TLS_THREAD
409 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
410 log_err("fio: can't create TLS key\n");
411#endif
412
413 fio_clock_source_inited = fio_clock_source;
414
415 if (calibrate_cpu_clock())
416 tsc_reliable = false;
417
418 /*
419 * If the arch sets tsc_reliable != 0, then it must be good enough
420 * to use as THE clock source. For x86 CPUs, this means the TSC
421 * runs at a constant rate and is synced across CPU cores.
422 */
423 if (tsc_reliable) {
424 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
425 fio_clock_source = CS_CPUCLOCK;
426 } else if (fio_clock_source == CS_CPUCLOCK)
427 log_info("fio: clocksource=cpu may not be reliable\n");
428}
429
430uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
431{
432 int64_t sec, nsec;
433
434 sec = e->tv_sec - s->tv_sec;
435 nsec = e->tv_nsec - s->tv_nsec;
436 if (sec > 0 && nsec < 0) {
437 sec--;
438 nsec += 1000000000LL;
439 }
440
441 /*
442 * time warp bug on some kernels?
443 */
444 if (sec < 0 || (sec == 0 && nsec < 0))
445 return 0;
446
447 return nsec + (sec * 1000000000LL);
448}
449
450uint64_t utime_since(const struct timespec *s, const struct timespec *e)
451{
452 int64_t sec, usec;
453
454 sec = e->tv_sec - s->tv_sec;
455 usec = (e->tv_nsec - s->tv_nsec) / 1000;
456 if (sec > 0 && usec < 0) {
457 sec--;
458 usec += 1000000;
459 }
460
461 /*
462 * time warp bug on some kernels?
463 */
464 if (sec < 0 || (sec == 0 && usec < 0))
465 return 0;
466
467 return usec + (sec * 1000000);
468}
469
470uint64_t utime_since_now(const struct timespec *s)
471{
472 struct timespec t;
473#ifdef FIO_DEBUG_TIME
474 void *p = __builtin_return_address(0);
475
476 fio_gettime(&t, p);
477#else
478 fio_gettime(&t, NULL);
479#endif
480
481 return utime_since(s, &t);
482}
483
484uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
485{
486 int64_t sec, usec;
487
488 sec = e->tv_sec - s->tv_sec;
489 usec = (e->tv_usec - s->tv_usec);
490 if (sec > 0 && usec < 0) {
491 sec--;
492 usec += 1000000;
493 }
494
495 if (sec < 0 || (sec == 0 && usec < 0))
496 return 0;
497
498 sec *= 1000;
499 usec /= 1000;
500 return sec + usec;
501}
502
503uint64_t mtime_since_now(const struct timespec *s)
504{
505 struct timespec t;
506#ifdef FIO_DEBUG_TIME
507 void *p = __builtin_return_address(0);
508
509 fio_gettime(&t, p);
510#else
511 fio_gettime(&t, NULL);
512#endif
513
514 return mtime_since(s, &t);
515}
516
517uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
518{
519 int64_t sec, usec;
520
521 sec = e->tv_sec - s->tv_sec;
522 usec = (e->tv_nsec - s->tv_nsec) / 1000;
523 if (sec > 0 && usec < 0) {
524 sec--;
525 usec += 1000000;
526 }
527
528 if (sec < 0 || (sec == 0 && usec < 0))
529 return 0;
530
531 sec *= 1000;
532 usec /= 1000;
533 return sec + usec;
534}
535
536uint64_t time_since_now(const struct timespec *s)
537{
538 return mtime_since_now(s) / 1000;
539}
540
541#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
542 defined(CONFIG_SFAA)
543
544#define CLOCK_ENTRIES_DEBUG 100000
545#define CLOCK_ENTRIES_TEST 10000
546
547struct clock_entry {
548 uint32_t seq;
549 uint32_t cpu;
550 uint64_t tsc;
551};
552
553struct clock_thread {
554 pthread_t thread;
555 int cpu;
556 int debug;
557 pthread_mutex_t lock;
558 pthread_mutex_t started;
559 unsigned long nr_entries;
560 uint32_t *seq;
561 struct clock_entry *entries;
562};
563
564static inline uint32_t atomic32_inc_return(uint32_t *seq)
565{
566 return 1 + __sync_fetch_and_add(seq, 1);
567}
568
569static void *clock_thread_fn(void *data)
570{
571 struct clock_thread *t = data;
572 struct clock_entry *c;
573 os_cpu_mask_t cpu_mask;
574 uint32_t last_seq;
575 unsigned long long first;
576 int i;
577
578 if (fio_cpuset_init(&cpu_mask)) {
579 int __err = errno;
580
581 log_err("clock cpuset init failed: %s\n", strerror(__err));
582 goto err_out;
583 }
584
585 fio_cpu_set(&cpu_mask, t->cpu);
586
587 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
588 int __err = errno;
589
590 log_err("clock setaffinity failed: %s\n", strerror(__err));
591 goto err;
592 }
593
594 pthread_mutex_lock(&t->lock);
595 pthread_mutex_unlock(&t->started);
596
597 first = get_cpu_clock();
598 last_seq = 0;
599 c = &t->entries[0];
600 for (i = 0; i < t->nr_entries; i++, c++) {
601 uint32_t seq;
602 uint64_t tsc;
603
604 c->cpu = t->cpu;
605 do {
606 seq = atomic32_inc_return(t->seq);
607 if (seq < last_seq)
608 break;
609 tsc = get_cpu_clock();
610 } while (seq != *t->seq);
611
612 c->seq = seq;
613 c->tsc = tsc;
614 }
615
616 if (t->debug) {
617 unsigned long long clocks;
618
619 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
620 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
621 clocks, first);
622 }
623
624 /*
625 * The most common platform clock breakage is returning zero
626 * indefinitely. Check for that and return failure.
627 */
628 if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
629 goto err;
630
631 fio_cpuset_exit(&cpu_mask);
632 return NULL;
633err:
634 fio_cpuset_exit(&cpu_mask);
635err_out:
636 return (void *) 1;
637}
638
639static int clock_cmp(const void *p1, const void *p2)
640{
641 const struct clock_entry *c1 = p1;
642 const struct clock_entry *c2 = p2;
643
644 if (c1->seq == c2->seq)
645 log_err("cs: bug in atomic sequence!\n");
646
647 return c1->seq - c2->seq;
648}
649
650int fio_monotonic_clocktest(int debug)
651{
652 struct clock_thread *cthreads;
653 unsigned int nr_cpus = cpus_online();
654 struct clock_entry *entries;
655 unsigned long nr_entries, tentries, failed = 0;
656 struct clock_entry *prev, *this;
657 uint32_t seq = 0;
658 unsigned int i;
659
660 if (debug) {
661 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
662
663#ifdef FIO_INC_DEBUG
664 fio_debug |= 1U << FD_TIME;
665#endif
666 nr_entries = CLOCK_ENTRIES_DEBUG;
667 } else
668 nr_entries = CLOCK_ENTRIES_TEST;
669
670 calibrate_cpu_clock();
671
672 if (debug) {
673#ifdef FIO_INC_DEBUG
674 fio_debug &= ~(1U << FD_TIME);
675#endif
676 }
677
678 cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
679 tentries = nr_entries * nr_cpus;
680 entries = malloc(tentries * sizeof(struct clock_entry));
681
682 if (debug)
683 log_info("cs: Testing %u CPUs\n", nr_cpus);
684
685 for (i = 0; i < nr_cpus; i++) {
686 struct clock_thread *t = &cthreads[i];
687
688 t->cpu = i;
689 t->debug = debug;
690 t->seq = &seq;
691 t->nr_entries = nr_entries;
692 t->entries = &entries[i * nr_entries];
693 pthread_mutex_init(&t->lock, NULL);
694 pthread_mutex_init(&t->started, NULL);
695 pthread_mutex_lock(&t->lock);
696 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
697 failed++;
698 nr_cpus = i;
699 break;
700 }
701 }
702
703 for (i = 0; i < nr_cpus; i++) {
704 struct clock_thread *t = &cthreads[i];
705
706 pthread_mutex_lock(&t->started);
707 }
708
709 for (i = 0; i < nr_cpus; i++) {
710 struct clock_thread *t = &cthreads[i];
711
712 pthread_mutex_unlock(&t->lock);
713 }
714
715 for (i = 0; i < nr_cpus; i++) {
716 struct clock_thread *t = &cthreads[i];
717 void *ret;
718
719 pthread_join(t->thread, &ret);
720 if (ret)
721 failed++;
722 }
723 free(cthreads);
724
725 if (failed) {
726 if (debug)
727 log_err("Clocksource test: %lu threads failed\n", failed);
728 goto err;
729 }
730
731 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
732
733 /* silence silly gcc */
734 prev = NULL;
735 for (failed = i = 0; i < tentries; i++) {
736 this = &entries[i];
737
738 if (!i) {
739 prev = this;
740 continue;
741 }
742
743 if (prev->tsc > this->tsc) {
744 uint64_t diff = prev->tsc - this->tsc;
745
746 if (!debug) {
747 failed++;
748 break;
749 }
750
751 log_info("cs: CPU clock mismatch (diff=%llu):\n",
752 (unsigned long long) diff);
753 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
754 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
755 failed++;
756 }
757
758 prev = this;
759 }
760
761 if (debug) {
762 if (failed)
763 log_info("cs: Failed: %lu\n", failed);
764 else
765 log_info("cs: Pass!\n");
766 }
767err:
768 free(entries);
769 return !!failed;
770}
771
772#else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
773
774int fio_monotonic_clocktest(int debug)
775{
776 if (debug)
777 log_info("cs: current platform does not support CPU clocks\n");
778 return 1;
779}
780
781#endif