time: add ntime_since_now()
[fio.git] / gettime.c
CommitLineData
02bcaa8c 1/*
f5cc024a 2 * Clock functions
02bcaa8c 3 */
f5cc024a 4
02bcaa8c 5#include <unistd.h>
c223da83 6#include <math.h>
02bcaa8c 7#include <sys/time.h>
03e20d68 8#include <time.h>
02bcaa8c
JA
9
10#include "fio.h"
be4ecfdf 11#include "smalloc.h"
02bcaa8c
JA
12
13#include "hash.h"
7d11f871 14#include "os/os.h"
02bcaa8c 15
dac499a0
AV
16#if defined(ARCH_HAVE_CPU_CLOCK)
17#ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
2aebc5a1 18static unsigned long cycles_per_msec;
6d02b37b
VF
19static unsigned long long cycles_start;
20static unsigned long long clock_mult;
21static unsigned long long max_cycles_mask;
22static unsigned long long nsecs_for_max_cycles;
23static unsigned int clock_shift;
24static unsigned int max_cycles_shift;
25#define MAX_CLOCK_SEC 60*60
96170421
CE
26#endif
27#ifdef ARCH_CPU_CLOCK_WRAPS
6d02b37b 28static unsigned int cycles_wrap;
09a32402 29#endif
dac499a0 30#endif
24575392 31bool tsc_reliable = false;
5d879392
JA
32
33struct tv_valid {
02dcf81c 34 int warned;
5d879392 35};
03be65c9 36#ifdef ARCH_HAVE_CPU_CLOCK
67bf9823 37#ifdef CONFIG_TLS_THREAD
b4ea84da 38static __thread struct tv_valid static_tv_valid;
67bf9823 39#else
5d879392 40static pthread_key_t tv_tls_key;
67bf9823 41#endif
03be65c9 42#endif
02bcaa8c 43
16de1bf9 44enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
fa80feae 45int fio_clock_source_set = 0;
10aa136b 46static enum fio_cs fio_clock_source_inited = CS_INVAL;
c223da83 47
02bcaa8c
JA
48#ifdef FIO_DEBUG_TIME
49
50#define HASH_BITS 8
51#define HASH_SIZE (1 << HASH_BITS)
52
01743ee1 53static struct flist_head hash[HASH_SIZE];
02bcaa8c
JA
54static int gtod_inited;
55
56struct gtod_log {
01743ee1 57 struct flist_head list;
02bcaa8c
JA
58 void *caller;
59 unsigned long calls;
60};
61
62static struct gtod_log *find_hash(void *caller)
63{
64 unsigned long h = hash_ptr(caller, HASH_BITS);
01743ee1 65 struct flist_head *entry;
02bcaa8c 66
01743ee1
JA
67 flist_for_each(entry, &hash[h]) {
68 struct gtod_log *log = flist_entry(entry, struct gtod_log,
69 list);
02bcaa8c
JA
70
71 if (log->caller == caller)
72 return log;
73 }
74
75 return NULL;
76}
77
d5e16441 78static void inc_caller(void *caller)
02bcaa8c
JA
79{
80 struct gtod_log *log = find_hash(caller);
81
82 if (!log) {
83 unsigned long h;
84
85 log = malloc(sizeof(*log));
01743ee1 86 INIT_FLIST_HEAD(&log->list);
02bcaa8c
JA
87 log->caller = caller;
88 log->calls = 0;
89
90 h = hash_ptr(caller, HASH_BITS);
01743ee1 91 flist_add_tail(&log->list, &hash[h]);
02bcaa8c
JA
92 }
93
d5e16441 94 log->calls++;
02bcaa8c
JA
95}
96
97static void gtod_log_caller(void *caller)
98{
d5e16441
JA
99 if (gtod_inited)
100 inc_caller(caller);
02bcaa8c
JA
101}
102
103static void fio_exit fio_dump_gtod(void)
104{
105 unsigned long total_calls = 0;
106 int i;
107
108 for (i = 0; i < HASH_SIZE; i++) {
01743ee1 109 struct flist_head *entry;
02bcaa8c
JA
110 struct gtod_log *log;
111
01743ee1
JA
112 flist_for_each(entry, &hash[i]) {
113 log = flist_entry(entry, struct gtod_log, list);
02bcaa8c 114
5ec10eaa
JA
115 printf("function %p, calls %lu\n", log->caller,
116 log->calls);
02bcaa8c
JA
117 total_calls += log->calls;
118 }
119 }
120
121 printf("Total %lu gettimeofday\n", total_calls);
122}
123
124static void fio_init gtod_init(void)
125{
126 int i;
127
128 for (i = 0; i < HASH_SIZE; i++)
01743ee1 129 INIT_FLIST_HEAD(&hash[i]);
02bcaa8c
JA
130
131 gtod_inited = 1;
132}
133
134#endif /* FIO_DEBUG_TIME */
135
67bf9823 136#ifdef CONFIG_CLOCK_GETTIME
9ff1c070
JA
137static int fill_clock_gettime(struct timespec *ts)
138{
c544f604
SN
139#if defined(CONFIG_CLOCK_MONOTONIC_RAW)
140 return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
141#elif defined(CONFIG_CLOCK_MONOTONIC)
9ff1c070
JA
142 return clock_gettime(CLOCK_MONOTONIC, ts);
143#else
144 return clock_gettime(CLOCK_REALTIME, ts);
145#endif
146}
1e97cce9 147#endif
67bf9823 148
8b6a404c 149static void __fio_gettime(struct timespec *tp)
02bcaa8c 150{
c223da83 151 switch (fio_clock_source) {
67bf9823 152#ifdef CONFIG_GETTIMEOFDAY
8b6a404c
VF
153 case CS_GTOD: {
154 struct timeval tv;
155 gettimeofday(&tv, NULL);
156
157 tp->tv_sec = tv.tv_sec;
158 tp->tv_nsec = tv.tv_usec * 1000;
c223da83 159 break;
8b6a404c 160 }
67bf9823
JA
161#endif
162#ifdef CONFIG_CLOCK_GETTIME
c223da83 163 case CS_CGETTIME: {
8b6a404c 164 if (fill_clock_gettime(tp) < 0) {
c223da83
JA
165 log_err("fio: clock_gettime fails\n");
166 assert(0);
02bcaa8c 167 }
c223da83
JA
168 break;
169 }
67bf9823 170#endif
c223da83
JA
171#ifdef ARCH_HAVE_CPU_CLOCK
172 case CS_CPUCLOCK: {
6d02b37b 173 uint64_t nsecs, t, multiples;
03be65c9
JA
174 struct tv_valid *tv;
175
176#ifdef CONFIG_TLS_THREAD
177 tv = &static_tv_valid;
178#else
179 tv = pthread_getspecific(tv_tls_key);
180#endif
c223da83
JA
181
182 t = get_cpu_clock();
96170421 183#ifdef ARCH_CPU_CLOCK_WRAPS
73df3e07
JA
184 if (t < cycles_start && !cycles_wrap)
185 cycles_wrap = 1;
03be65c9
JA
186 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
187 log_err("fio: double CPU clock wrap\n");
188 tv->warned = 1;
02dcf81c 189 }
96170421 190#endif
919e789d 191#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
6d02b37b 192 nsecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC * 1000;
919e789d 193#else
6d02b37b
VF
194 t -= cycles_start;
195 multiples = t >> max_cycles_shift;
196 nsecs = multiples * nsecs_for_max_cycles;
197 nsecs += ((t & max_cycles_mask) * clock_mult) >> clock_shift;
919e789d 198#endif
8b6a404c
VF
199 tp->tv_sec = nsecs / 1000000000ULL;
200 tp->tv_nsec = nsecs % 1000000000ULL;
c223da83
JA
201 break;
202 }
203#endif
204 default:
205 log_err("fio: invalid clock source %d\n", fio_clock_source);
206 break;
02bcaa8c 207 }
67bf9823
JA
208}
209
210#ifdef FIO_DEBUG_TIME
8b6a404c 211void fio_gettime(struct timespec *tp, void *caller)
67bf9823 212#else
8b6a404c 213void fio_gettime(struct timespec *tp, void fio_unused *caller)
67bf9823
JA
214#endif
215{
67bf9823
JA
216#ifdef FIO_DEBUG_TIME
217 if (!caller)
218 caller = __builtin_return_address(0);
219
220 gtod_log_caller(caller);
221#endif
27325ed5 222 if (fio_unlikely(fio_gettime_offload(tp)))
67bf9823 223 return;
67bf9823 224
9eb271b9 225 __fio_gettime(tp);
02bcaa8c 226}
be4ecfdf 227
919e789d 228#if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
2aebc5a1 229static unsigned long get_cycles_per_msec(void)
c223da83 230{
8b6a404c 231 struct timespec s, e;
ba458c2f 232 uint64_t c_s, c_e;
67bf9823 233 enum fio_cs old_cs = fio_clock_source;
99afcdb5 234 uint64_t elapsed;
c223da83 235
67bf9823
JA
236#ifdef CONFIG_CLOCK_GETTIME
237 fio_clock_source = CS_CGETTIME;
238#else
239 fio_clock_source = CS_GTOD;
240#endif
241 __fio_gettime(&s);
9ff1c070 242
c223da83
JA
243 c_s = get_cpu_clock();
244 do {
67bf9823 245 __fio_gettime(&e);
9ff1c070 246
c223da83 247 elapsed = utime_since(&s, &e);
486332e5 248 if (elapsed >= 1280) {
c223da83
JA
249 c_e = get_cpu_clock();
250 break;
251 }
252 } while (1);
253
67bf9823 254 fio_clock_source = old_cs;
2aebc5a1 255 return (c_e - c_s) * 1000 / elapsed;
c223da83
JA
256}
257
fa80feae
JA
258#define NR_TIME_ITERS 50
259
e259879e 260static int calibrate_cpu_clock(void)
c223da83
JA
261{
262 double delta, mean, S;
e51a6629 263 uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
6d02b37b
VF
264 int i, samples, sft = 0;
265 unsigned long long tmp, max_ticks, max_mult;
c223da83 266
2aebc5a1 267 cycles[0] = get_cycles_per_msec();
c223da83 268 S = delta = mean = 0.0;
fa80feae 269 for (i = 0; i < NR_TIME_ITERS; i++) {
2aebc5a1 270 cycles[i] = get_cycles_per_msec();
c223da83
JA
271 delta = cycles[i] - mean;
272 if (delta) {
273 mean += delta / (i + 1.0);
274 S += delta * (cycles[i] - mean);
275 }
276 }
277
e259879e
JA
278 /*
279 * The most common platform clock breakage is returning zero
280 * indefinitely. Check for that and return failure.
281 */
282 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
283 return 1;
284
fa80feae 285 S = sqrt(S / (NR_TIME_ITERS - 1.0));
c223da83 286
e51a6629
JA
287 minc = -1ULL;
288 maxc = samples = avg = 0;
fa80feae 289 for (i = 0; i < NR_TIME_ITERS; i++) {
c223da83
JA
290 double this = cycles[i];
291
e51a6629
JA
292 minc = min(cycles[i], minc);
293 maxc = max(cycles[i], maxc);
294
03e20d68 295 if ((fmax(this, mean) - fmin(this, mean)) > S)
c223da83
JA
296 continue;
297 samples++;
298 avg += this;
299 }
300
fa80feae 301 S /= (double) NR_TIME_ITERS;
c223da83 302
fa80feae 303 for (i = 0; i < NR_TIME_ITERS; i++)
13aa415a 304 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
c223da83 305
d7abad3d 306 avg /= samples;
2aebc5a1 307 cycles_per_msec = avg;
4b91ee8f 308 dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
e51a6629
JA
309 dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
310 (unsigned long long) minc,
311 (unsigned long long) maxc, mean, S);
c223da83 312
2aebc5a1 313 max_ticks = MAX_CLOCK_SEC * cycles_per_msec * 1000ULL;
74558486
JA
314 max_mult = ULLONG_MAX / max_ticks;
315 dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, "
316 "max_mult=%llu\n", max_ticks,
317 __builtin_clzll(max_ticks), max_mult);
6d02b37b
VF
318
319 /*
320 * Find the largest shift count that will produce
321 * a multiplier that does not exceed max_mult
322 */
2aebc5a1 323 tmp = max_mult * cycles_per_msec / 1000000;
6d02b37b
VF
324 while (tmp > 1) {
325 tmp >>= 1;
326 sft++;
327 dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
328 }
329
74558486
JA
330 clock_shift = sft;
331 clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
332 dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift,
333 clock_mult);
6d02b37b 334
74558486
JA
335 /*
336 * Find the greatest power of 2 clock ticks that is less than the
337 * ticks in MAX_CLOCK_SEC_2STAGE
338 */
6d02b37b 339 max_cycles_shift = max_cycles_mask = 0;
2aebc5a1 340 tmp = MAX_CLOCK_SEC * 1000ULL * cycles_per_msec;
74558486
JA
341 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp,
342 max_cycles_shift);
6d02b37b
VF
343 while (tmp > 1) {
344 tmp >>= 1;
345 max_cycles_shift++;
346 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
347 }
74558486
JA
348 /*
349 * if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec
350 * here we will have a discontinuity every
351 * (1ULL << max_cycles_shift) cycles
352 */
353 nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult)
354 >> clock_shift;
6d02b37b 355
74558486 356 /* Use a bitmask to calculate ticks % (1ULL << max_cycles_shift) */
6d02b37b
VF
357 for (tmp = 0; tmp < max_cycles_shift; tmp++)
358 max_cycles_mask |= 1ULL << tmp;
359
74558486
JA
360 dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, "
361 "nsecs_for_max_cycles=%llu, "
362 "max_cycles_mask=%016llx\n",
363 max_cycles_shift, (1ULL << max_cycles_shift),
364 nsecs_for_max_cycles, max_cycles_mask);
6d02b37b 365
73df3e07
JA
366 cycles_start = get_cpu_clock();
367 dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
e259879e 368 return 0;
09a32402
JA
369}
370#else
e259879e 371static int calibrate_cpu_clock(void)
09a32402 372{
919e789d
CE
373#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
374 return 0;
375#else
e259879e 376 return 1;
09a32402 377#endif
919e789d
CE
378}
379#endif // ARCH_HAVE_CPU_CLOCK
09a32402 380
67bf9823 381#ifndef CONFIG_TLS_THREAD
5d879392
JA
382void fio_local_clock_init(int is_thread)
383{
384 struct tv_valid *t;
385
572cfb3f 386 t = calloc(1, sizeof(*t));
9eb271b9 387 if (pthread_setspecific(tv_tls_key, t)) {
5d879392 388 log_err("fio: can't set TLS key\n");
9eb271b9
JA
389 assert(0);
390 }
5d879392
JA
391}
392
393static void kill_tv_tls_key(void *data)
394{
395 free(data);
396}
67bf9823
JA
397#else
398void fio_local_clock_init(int is_thread)
399{
400}
401#endif
5d879392 402
09a32402
JA
403void fio_clock_init(void)
404{
01423eae
JA
405 if (fio_clock_source == fio_clock_source_inited)
406 return;
407
67bf9823 408#ifndef CONFIG_TLS_THREAD
5d879392
JA
409 if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
410 log_err("fio: can't create TLS key\n");
67bf9823 411#endif
5d879392 412
01423eae 413 fio_clock_source_inited = fio_clock_source;
e259879e
JA
414
415 if (calibrate_cpu_clock())
24575392 416 tsc_reliable = false;
fa80feae
JA
417
418 /*
419 * If the arch sets tsc_reliable != 0, then it must be good enough
420 * to use as THE clock source. For x86 CPUs, this means the TSC
421 * runs at a constant rate and is synced across CPU cores.
422 */
423 if (tsc_reliable) {
aad918e4 424 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
fa80feae
JA
425 fio_clock_source = CS_CPUCLOCK;
426 } else if (fio_clock_source == CS_CPUCLOCK)
427 log_info("fio: clocksource=cpu may not be reliable\n");
dae89ac9 428 dprint(FD_TIME, "gettime: clocksource=%d\n", (int) fio_clock_source);
c223da83
JA
429}
430
d6bb626e
VF
431uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
432{
433 int64_t sec, nsec;
434
435 sec = e->tv_sec - s->tv_sec;
436 nsec = e->tv_nsec - s->tv_nsec;
437 if (sec > 0 && nsec < 0) {
6d02b37b
VF
438 sec--;
439 nsec += 1000000000LL;
d6bb626e
VF
440 }
441
442 /*
6d02b37b
VF
443 * time warp bug on some kernels?
444 */
d6bb626e 445 if (sec < 0 || (sec == 0 && nsec < 0))
6d02b37b 446 return 0;
d6bb626e
VF
447
448 return nsec + (sec * 1000000000LL);
449}
450
0410e783
JA
451uint64_t ntime_since_now(const struct timespec *s)
452{
453 struct timespec now;
454
455 fio_gettime(&now, NULL);
456 return ntime_since(s, &now);
457}
458
8b6a404c 459uint64_t utime_since(const struct timespec *s, const struct timespec *e)
be4ecfdf 460{
90eff1c9 461 int64_t sec, usec;
39ab7da2
JA
462
463 sec = e->tv_sec - s->tv_sec;
8b6a404c 464 usec = (e->tv_nsec - s->tv_nsec) / 1000;
39ab7da2
JA
465 if (sec > 0 && usec < 0) {
466 sec--;
467 usec += 1000000;
468 }
469
470 /*
471 * time warp bug on some kernels?
472 */
473 if (sec < 0 || (sec == 0 && usec < 0))
474 return 0;
475
20ac4e77 476 return usec + (sec * 1000000);
be4ecfdf
JA
477}
478
8b6a404c 479uint64_t utime_since_now(const struct timespec *s)
be4ecfdf 480{
8b6a404c 481 struct timespec t;
f52e9198
VF
482#ifdef FIO_DEBUG_TIME
483 void *p = __builtin_return_address(0);
39ab7da2 484
f52e9198
VF
485 fio_gettime(&t, p);
486#else
39ab7da2 487 fio_gettime(&t, NULL);
f52e9198
VF
488#endif
489
39ab7da2 490 return utime_since(s, &t);
be4ecfdf 491}
783a3eb1 492
8b6a404c 493uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
783a3eb1 494{
8b6a404c 495 int64_t sec, usec;
783a3eb1 496
39ab7da2 497 sec = e->tv_sec - s->tv_sec;
8b6a404c 498 usec = (e->tv_usec - s->tv_usec);
39ab7da2
JA
499 if (sec > 0 && usec < 0) {
500 sec--;
501 usec += 1000000;
783a3eb1
JA
502 }
503
39ab7da2
JA
504 if (sec < 0 || (sec == 0 && usec < 0))
505 return 0;
506
be6bb2b7
Y
507 sec *= 1000;
508 usec /= 1000;
20ac4e77 509 return sec + usec;
783a3eb1
JA
510}
511
8b6a404c 512uint64_t mtime_since_now(const struct timespec *s)
783a3eb1 513{
8b6a404c 514 struct timespec t;
f52e9198 515#ifdef FIO_DEBUG_TIME
39ab7da2 516 void *p = __builtin_return_address(0);
783a3eb1 517
39ab7da2 518 fio_gettime(&t, p);
f52e9198
VF
519#else
520 fio_gettime(&t, NULL);
521#endif
522
39ab7da2
JA
523 return mtime_since(s, &t);
524}
783a3eb1 525
8b6a404c
VF
526uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
527{
528 int64_t sec, usec;
529
530 sec = e->tv_sec - s->tv_sec;
531 usec = (e->tv_nsec - s->tv_nsec) / 1000;
532 if (sec > 0 && usec < 0) {
533 sec--;
534 usec += 1000000;
535 }
536
537 if (sec < 0 || (sec == 0 && usec < 0))
538 return 0;
539
540 sec *= 1000;
541 usec /= 1000;
542 return sec + usec;
543}
544
545uint64_t time_since_now(const struct timespec *s)
39ab7da2
JA
546{
547 return mtime_since_now(s) / 1000;
783a3eb1 548}
7d11f871 549
67bf9823
JA
550#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
551 defined(CONFIG_SFAA)
7d11f871 552
aad918e4 553#define CLOCK_ENTRIES_DEBUG 100000
5283741f 554#define CLOCK_ENTRIES_TEST 1000
7d11f871
JA
555
556struct clock_entry {
58002f9a
JA
557 uint32_t seq;
558 uint32_t cpu;
ba458c2f 559 uint64_t tsc;
7d11f871
JA
560};
561
562struct clock_thread {
563 pthread_t thread;
564 int cpu;
aad918e4 565 int debug;
7d11f871
JA
566 pthread_mutex_t lock;
567 pthread_mutex_t started;
aad918e4 568 unsigned long nr_entries;
58002f9a 569 uint32_t *seq;
7d11f871
JA
570 struct clock_entry *entries;
571};
572
58002f9a 573static inline uint32_t atomic32_inc_return(uint32_t *seq)
7d11f871
JA
574{
575 return 1 + __sync_fetch_and_add(seq, 1);
576}
577
578static void *clock_thread_fn(void *data)
579{
580 struct clock_thread *t = data;
581 struct clock_entry *c;
582 os_cpu_mask_t cpu_mask;
58002f9a 583 uint32_t last_seq;
5896d827 584 unsigned long long first;
7d11f871
JA
585 int i;
586
c763aea6 587 if (fio_cpuset_init(&cpu_mask)) {
37e20021 588 int __err = errno;
c763aea6
JA
589
590 log_err("clock cpuset init failed: %s\n", strerror(__err));
591 goto err_out;
592 }
593
7d11f871
JA
594 fio_cpu_set(&cpu_mask, t->cpu);
595
596 if (fio_setaffinity(gettid(), cpu_mask) == -1) {
e66d7f90
JA
597 int __err = errno;
598
599 log_err("clock setaffinity failed: %s\n", strerror(__err));
c763aea6 600 goto err;
7d11f871
JA
601 }
602
7d11f871 603 pthread_mutex_lock(&t->lock);
b9b3498e 604 pthread_mutex_unlock(&t->started);
7d11f871 605
5896d827 606 first = get_cpu_clock();
58002f9a 607 last_seq = 0;
7d11f871 608 c = &t->entries[0];
aad918e4 609 for (i = 0; i < t->nr_entries; i++, c++) {
58002f9a
JA
610 uint32_t seq;
611 uint64_t tsc;
7d11f871
JA
612
613 c->cpu = t->cpu;
614 do {
58002f9a
JA
615 seq = atomic32_inc_return(t->seq);
616 if (seq < last_seq)
617 break;
7d11f871
JA
618 tsc = get_cpu_clock();
619 } while (seq != *t->seq);
620
621 c->seq = seq;
622 c->tsc = tsc;
623 }
624
aad918e4
JA
625 if (t->debug) {
626 unsigned long long clocks;
627
628 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
5896d827
JA
629 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
630 clocks, first);
aad918e4 631 }
58002f9a 632
e259879e
JA
633 /*
634 * The most common platform clock breakage is returning zero
635 * indefinitely. Check for that and return failure.
636 */
58002f9a 637 if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
c763aea6 638 goto err;
e259879e 639
d47d7cb3 640 fio_cpuset_exit(&cpu_mask);
7d11f871 641 return NULL;
c763aea6
JA
642err:
643 fio_cpuset_exit(&cpu_mask);
644err_out:
645 return (void *) 1;
7d11f871
JA
646}
647
648static int clock_cmp(const void *p1, const void *p2)
649{
650 const struct clock_entry *c1 = p1;
651 const struct clock_entry *c2 = p2;
652
b9b3498e
JA
653 if (c1->seq == c2->seq)
654 log_err("cs: bug in atomic sequence!\n");
655
7d11f871
JA
656 return c1->seq - c2->seq;
657}
658
aad918e4 659int fio_monotonic_clocktest(int debug)
7d11f871 660{
8a1db9a1 661 struct clock_thread *cthreads;
7d11f871
JA
662 unsigned int nr_cpus = cpus_online();
663 struct clock_entry *entries;
aad918e4 664 unsigned long nr_entries, tentries, failed = 0;
80da8a8f 665 struct clock_entry *prev, *this;
58002f9a 666 uint32_t seq = 0;
caa3eb1c 667 unsigned int i;
7d11f871 668
aad918e4
JA
669 if (debug) {
670 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
d5e3f5d8 671
b5b571a3 672#ifdef FIO_INC_DEBUG
aad918e4 673 fio_debug |= 1U << FD_TIME;
b5b571a3 674#endif
aad918e4
JA
675 nr_entries = CLOCK_ENTRIES_DEBUG;
676 } else
677 nr_entries = CLOCK_ENTRIES_TEST;
678
4f1d43c2 679 calibrate_cpu_clock();
aad918e4
JA
680
681 if (debug) {
b5b571a3 682#ifdef FIO_INC_DEBUG
aad918e4 683 fio_debug &= ~(1U << FD_TIME);
b5b571a3 684#endif
aad918e4 685 }
4f1d43c2 686
8a1db9a1 687 cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
aad918e4 688 tentries = nr_entries * nr_cpus;
7d11f871
JA
689 entries = malloc(tentries * sizeof(struct clock_entry));
690
aad918e4
JA
691 if (debug)
692 log_info("cs: Testing %u CPUs\n", nr_cpus);
7d11f871
JA
693
694 for (i = 0; i < nr_cpus; i++) {
8a1db9a1 695 struct clock_thread *t = &cthreads[i];
7d11f871
JA
696
697 t->cpu = i;
aad918e4 698 t->debug = debug;
7d11f871 699 t->seq = &seq;
aad918e4
JA
700 t->nr_entries = nr_entries;
701 t->entries = &entries[i * nr_entries];
7d11f871
JA
702 pthread_mutex_init(&t->lock, NULL);
703 pthread_mutex_init(&t->started, NULL);
704 pthread_mutex_lock(&t->lock);
6b0110cd
JA
705 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
706 failed++;
707 nr_cpus = i;
708 break;
709 }
7d11f871
JA
710 }
711
712 for (i = 0; i < nr_cpus; i++) {
8a1db9a1 713 struct clock_thread *t = &cthreads[i];
7d11f871
JA
714
715 pthread_mutex_lock(&t->started);
716 }
717
718 for (i = 0; i < nr_cpus; i++) {
8a1db9a1 719 struct clock_thread *t = &cthreads[i];
7d11f871
JA
720
721 pthread_mutex_unlock(&t->lock);
722 }
723
814917be 724 for (i = 0; i < nr_cpus; i++) {
8a1db9a1 725 struct clock_thread *t = &cthreads[i];
7d11f871
JA
726 void *ret;
727
728 pthread_join(t->thread, &ret);
729 if (ret)
730 failed++;
731 }
8a1db9a1 732 free(cthreads);
7d11f871
JA
733
734 if (failed) {
aad918e4
JA
735 if (debug)
736 log_err("Clocksource test: %lu threads failed\n", failed);
7d11f871
JA
737 goto err;
738 }
739
740 qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
741
a4aa3dc0
JA
742 /* silence silly gcc */
743 prev = NULL;
7d11f871 744 for (failed = i = 0; i < tentries; i++) {
80da8a8f 745 this = &entries[i];
7d11f871
JA
746
747 if (!i) {
748 prev = this;
749 continue;
750 }
751
752 if (prev->tsc > this->tsc) {
753 uint64_t diff = prev->tsc - this->tsc;
754
aad918e4
JA
755 if (!debug) {
756 failed++;
757 break;
758 }
759
4e0a8fa2
JA
760 log_info("cs: CPU clock mismatch (diff=%llu):\n",
761 (unsigned long long) diff);
762 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
763 log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
7d11f871
JA
764 failed++;
765 }
766
767 prev = this;
768 }
769
aad918e4
JA
770 if (debug) {
771 if (failed)
772 log_info("cs: Failed: %lu\n", failed);
773 else
774 log_info("cs: Pass!\n");
775 }
7d11f871
JA
776err:
777 free(entries);
778 return !!failed;
779}
780
781#else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
782
aad918e4 783int fio_monotonic_clocktest(int debug)
7d11f871 784{
aad918e4
JA
785 if (debug)
786 log_info("cs: current platform does not support CPU clocks\n");
787 return 1;
7d11f871
JA
788}
789
790#endif