nanosecond: fix up conversion of ticks to nsec by doing the conversion in 2 stages
[fio.git] / gettime.c
1 /*
2  * Clock functions
3  */
4
5 #include <unistd.h>
6 #include <math.h>
7 #include <sys/time.h>
8 #include <time.h>
9
10 #include "fio.h"
11 #include "smalloc.h"
12
13 #include "hash.h"
14 #include "os/os.h"
15
16 #if defined(ARCH_HAVE_CPU_CLOCK)
17 #ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
18 static unsigned long cycles_per_usec;
19 static unsigned long long cycles_start;
20 static unsigned long long clock_mult;
21 static unsigned long long max_cycles_mask;
22 static unsigned long long nsecs_for_max_cycles;
23 static unsigned int clock_shift;
24 static unsigned int max_cycles_shift;
25 #define MAX_CLOCK_SEC 60*60
26 #endif
27 #ifdef ARCH_CPU_CLOCK_WRAPS
28 static unsigned int cycles_wrap;
29 #endif
30 #endif
31 int tsc_reliable = 0;
32
33 struct tv_valid {
34         uint64_t last_cycles;
35         int last_tv_valid;
36         int warned;
37 };
38 #ifdef ARCH_HAVE_CPU_CLOCK
39 #ifdef CONFIG_TLS_THREAD
40 static __thread struct tv_valid static_tv_valid;
41 #else
42 static pthread_key_t tv_tls_key;
43 #endif
44 #endif
45
46 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
47 int fio_clock_source_set = 0;
48 static enum fio_cs fio_clock_source_inited = CS_INVAL;
49
50 #ifdef FIO_DEBUG_TIME
51
52 #define HASH_BITS       8
53 #define HASH_SIZE       (1 << HASH_BITS)
54
55 static struct flist_head hash[HASH_SIZE];
56 static int gtod_inited;
57
58 struct gtod_log {
59         struct flist_head list;
60         void *caller;
61         unsigned long calls;
62 };
63
64 static struct gtod_log *find_hash(void *caller)
65 {
66         unsigned long h = hash_ptr(caller, HASH_BITS);
67         struct flist_head *entry;
68
69         flist_for_each(entry, &hash[h]) {
70                 struct gtod_log *log = flist_entry(entry, struct gtod_log,
71                                                                         list);
72
73                 if (log->caller == caller)
74                         return log;
75         }
76
77         return NULL;
78 }
79
80 static void inc_caller(void *caller)
81 {
82         struct gtod_log *log = find_hash(caller);
83
84         if (!log) {
85                 unsigned long h;
86
87                 log = malloc(sizeof(*log));
88                 INIT_FLIST_HEAD(&log->list);
89                 log->caller = caller;
90                 log->calls = 0;
91
92                 h = hash_ptr(caller, HASH_BITS);
93                 flist_add_tail(&log->list, &hash[h]);
94         }
95
96         log->calls++;
97 }
98
99 static void gtod_log_caller(void *caller)
100 {
101         if (gtod_inited)
102                 inc_caller(caller);
103 }
104
105 static void fio_exit fio_dump_gtod(void)
106 {
107         unsigned long total_calls = 0;
108         int i;
109
110         for (i = 0; i < HASH_SIZE; i++) {
111                 struct flist_head *entry;
112                 struct gtod_log *log;
113
114                 flist_for_each(entry, &hash[i]) {
115                         log = flist_entry(entry, struct gtod_log, list);
116
117                         printf("function %p, calls %lu\n", log->caller,
118                                                                 log->calls);
119                         total_calls += log->calls;
120                 }
121         }
122
123         printf("Total %lu gettimeofday\n", total_calls);
124 }
125
126 static void fio_init gtod_init(void)
127 {
128         int i;
129
130         for (i = 0; i < HASH_SIZE; i++)
131                 INIT_FLIST_HEAD(&hash[i]);
132
133         gtod_inited = 1;
134 }
135
136 #endif /* FIO_DEBUG_TIME */
137
138 #ifdef CONFIG_CLOCK_GETTIME
139 static int fill_clock_gettime(struct timespec *ts)
140 {
141 #if defined(CONFIG_CLOCK_MONOTONIC_RAW)
142         return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
143 #elif defined(CONFIG_CLOCK_MONOTONIC)
144         return clock_gettime(CLOCK_MONOTONIC, ts);
145 #else
146         return clock_gettime(CLOCK_REALTIME, ts);
147 #endif
148 }
149 #endif
150
151 static void __fio_gettime(struct timespec *tp)
152 {
153         switch (fio_clock_source) {
154 #ifdef CONFIG_GETTIMEOFDAY
155         case CS_GTOD: {
156                 struct timeval tv;
157                 gettimeofday(&tv, NULL);
158
159                 tp->tv_sec = tv.tv_sec;
160                 tp->tv_nsec = tv.tv_usec * 1000;
161                 break;
162                 }
163 #endif
164 #ifdef CONFIG_CLOCK_GETTIME
165         case CS_CGETTIME: {
166                 if (fill_clock_gettime(tp) < 0) {
167                         log_err("fio: clock_gettime fails\n");
168                         assert(0);
169                 }
170                 break;
171                 }
172 #endif
173 #ifdef ARCH_HAVE_CPU_CLOCK
174         case CS_CPUCLOCK: {
175                 uint64_t nsecs, t, multiples;
176                 struct tv_valid *tv;
177
178 #ifdef CONFIG_TLS_THREAD
179                 tv = &static_tv_valid;
180 #else
181                 tv = pthread_getspecific(tv_tls_key);
182 #endif
183
184                 t = get_cpu_clock();
185 #ifdef ARCH_CPU_CLOCK_WRAPS
186                 if (t < cycles_start && !cycles_wrap)
187                         cycles_wrap = 1;
188                 else if (cycles_wrap && t >= cycles_start && !tv->warned) {
189                         log_err("fio: double CPU clock wrap\n");
190                         tv->warned = 1;
191                 }
192 #endif
193 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
194                 nsecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC * 1000;
195 #else
196                 t -= cycles_start;
197                 multiples = t >> max_cycles_shift;
198                 nsecs = multiples * nsecs_for_max_cycles;
199                 nsecs += ((t & max_cycles_mask) * clock_mult) >> clock_shift;
200 #endif
201                 tv->last_cycles = t;
202                 tv->last_tv_valid = 1;
203
204                 tp->tv_sec = nsecs / 1000000000ULL;
205                 tp->tv_nsec = nsecs % 1000000000ULL;
206                 break;
207                 }
208 #endif
209         default:
210                 log_err("fio: invalid clock source %d\n", fio_clock_source);
211                 break;
212         }
213 }
214
215 #ifdef FIO_DEBUG_TIME
216 void fio_gettime(struct timespec *tp, void *caller)
217 #else
218 void fio_gettime(struct timespec *tp, void fio_unused *caller)
219 #endif
220 {
221 #ifdef FIO_DEBUG_TIME
222         if (!caller)
223                 caller = __builtin_return_address(0);
224
225         gtod_log_caller(caller);
226 #endif
227         if (fio_unlikely(fio_gettime_offload(tp)))
228                 return;
229
230         __fio_gettime(tp);
231 }
232
233 #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
234 static unsigned long get_cycles_per_usec(void)
235 {
236         struct timespec s, e;
237         uint64_t c_s, c_e;
238         enum fio_cs old_cs = fio_clock_source;
239         uint64_t elapsed;
240
241 #ifdef CONFIG_CLOCK_GETTIME
242         fio_clock_source = CS_CGETTIME;
243 #else
244         fio_clock_source = CS_GTOD;
245 #endif
246         __fio_gettime(&s);
247
248         c_s = get_cpu_clock();
249         do {
250                 __fio_gettime(&e);
251
252                 elapsed = utime_since(&s, &e);
253                 if (elapsed >= 1280) {
254                         c_e = get_cpu_clock();
255                         break;
256                 }
257         } while (1);
258
259         fio_clock_source = old_cs;
260         return (c_e - c_s) / elapsed;
261 }
262
263 #define NR_TIME_ITERS   50
264
265 static int calibrate_cpu_clock(void)
266 {
267         double delta, mean, S;
268         uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
269         int i, samples, sft = 0;
270         unsigned long long tmp, max_ticks, max_mult;
271
272         cycles[0] = get_cycles_per_usec();
273         S = delta = mean = 0.0;
274         for (i = 0; i < NR_TIME_ITERS; i++) {
275                 cycles[i] = get_cycles_per_usec();
276                 delta = cycles[i] - mean;
277                 if (delta) {
278                         mean += delta / (i + 1.0);
279                         S += delta * (cycles[i] - mean);
280                 }
281         }
282
283         /*
284          * The most common platform clock breakage is returning zero
285          * indefinitely. Check for that and return failure.
286          */
287         if (!cycles[0] && !cycles[NR_TIME_ITERS - 1])
288                 return 1;
289
290         S = sqrt(S / (NR_TIME_ITERS - 1.0));
291
292         minc = -1ULL;
293         maxc = samples = avg = 0;
294         for (i = 0; i < NR_TIME_ITERS; i++) {
295                 double this = cycles[i];
296
297                 minc = min(cycles[i], minc);
298                 maxc = max(cycles[i], maxc);
299
300                 if ((fmax(this, mean) - fmin(this, mean)) > S)
301                         continue;
302                 samples++;
303                 avg += this;
304         }
305
306         S /= (double) NR_TIME_ITERS;
307
308         for (i = 0; i < NR_TIME_ITERS; i++)
309                 dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
310
311         avg /= samples;
312         cycles_per_usec = avg;
313         dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
314         dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
315                         (unsigned long long) minc,
316                         (unsigned long long) maxc, mean, S);
317
318         max_ticks = MAX_CLOCK_SEC * cycles_per_usec * 1000000ULL;
319         max_mult = ULLONG_MAX / max_ticks;
320         dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, max_mult=%llu\n",
321                 max_ticks, __builtin_clzll(max_ticks), max_mult);
322
323         /*
324          * Find the largest shift count that will produce
325          * a multiplier that does not exceed max_mult
326          */
327         tmp = max_mult * cycles_per_usec / 1000;
328         while (tmp > 1) {
329                 tmp >>= 1;
330                 sft++;
331                 dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
332         }
333
334         clock_shift = sft;
335         clock_mult = (1ULL << sft) * 1000 / cycles_per_usec;
336         dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift, clock_mult);
337
338         // Find the greatest power of 2 clock ticks that is less than the ticks in MAX_CLOCK_SEC_2STAGE
339         max_cycles_shift = max_cycles_mask = 0;
340         tmp = MAX_CLOCK_SEC * 1000000ULL * cycles_per_usec;
341         dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
342         while (tmp > 1) {
343                 tmp >>= 1;
344                 max_cycles_shift++;
345                 dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
346         }
347         // if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_usec here we will
348         // have a discontinuity every (1ULL << max_cycles_shift) cycles
349         nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult) >> clock_shift;
350
351         // Use a bitmask to calculate ticks % (1ULL << max_cycles_shift)
352         for (tmp = 0; tmp < max_cycles_shift; tmp++)
353                 max_cycles_mask |= 1ULL << tmp;
354
355         dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, nsecs_for_max_cycles=%llu, max_cycles_mask=%016llx\n",
356                 max_cycles_shift, (1ULL << max_cycles_shift),
357                 nsecs_for_max_cycles, max_cycles_mask);
358
359         cycles_start = get_cpu_clock();
360         dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
361         return 0;
362 }
363 #else
364 static int calibrate_cpu_clock(void)
365 {
366 #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
367         return 0;
368 #else
369         return 1;
370 #endif
371 }
372 #endif // ARCH_HAVE_CPU_CLOCK
373
374 #ifndef CONFIG_TLS_THREAD
375 void fio_local_clock_init(int is_thread)
376 {
377         struct tv_valid *t;
378
379         t = calloc(1, sizeof(*t));
380         if (pthread_setspecific(tv_tls_key, t)) {
381                 log_err("fio: can't set TLS key\n");
382                 assert(0);
383         }
384 }
385
386 static void kill_tv_tls_key(void *data)
387 {
388         free(data);
389 }
390 #else
391 void fio_local_clock_init(int is_thread)
392 {
393 }
394 #endif
395
396 void fio_clock_init(void)
397 {
398         if (fio_clock_source == fio_clock_source_inited)
399                 return;
400
401 #ifndef CONFIG_TLS_THREAD
402         if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
403                 log_err("fio: can't create TLS key\n");
404 #endif
405
406         fio_clock_source_inited = fio_clock_source;
407
408         if (calibrate_cpu_clock())
409                 tsc_reliable = 0;
410
411         /*
412          * If the arch sets tsc_reliable != 0, then it must be good enough
413          * to use as THE clock source. For x86 CPUs, this means the TSC
414          * runs at a constant rate and is synced across CPU cores.
415          */
416         if (tsc_reliable) {
417                 if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
418                         fio_clock_source = CS_CPUCLOCK;
419         } else if (fio_clock_source == CS_CPUCLOCK)
420                 log_info("fio: clocksource=cpu may not be reliable\n");
421 }
422
423 uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
424 {
425        int64_t sec, nsec;
426
427        sec = e->tv_sec - s->tv_sec;
428        nsec = e->tv_nsec - s->tv_nsec;
429        if (sec > 0 && nsec < 0) {
430                sec--;
431                nsec += 1000000000LL;
432        }
433
434        /*
435         * time warp bug on some kernels?
436         */
437        if (sec < 0 || (sec == 0 && nsec < 0))
438                return 0;
439
440        return nsec + (sec * 1000000000LL);
441 }
442
443 uint64_t utime_since(const struct timespec *s, const struct timespec *e)
444 {
445         int64_t sec, usec;
446
447         sec = e->tv_sec - s->tv_sec;
448         usec = (e->tv_nsec - s->tv_nsec) / 1000;
449         if (sec > 0 && usec < 0) {
450                 sec--;
451                 usec += 1000000;
452         }
453
454         /*
455          * time warp bug on some kernels?
456          */
457         if (sec < 0 || (sec == 0 && usec < 0))
458                 return 0;
459
460         return usec + (sec * 1000000);
461 }
462
463 uint64_t utime_since_now(const struct timespec *s)
464 {
465         struct timespec t;
466 #ifdef FIO_DEBUG_TIME
467         void *p = __builtin_return_address(0);
468
469         fio_gettime(&t, p);
470 #else
471         fio_gettime(&t, NULL);
472 #endif
473
474         return utime_since(s, &t);
475 }
476
477 uint64_t mtime_since_tv(const struct timeval *s, const struct timeval *e)
478 {
479         int64_t sec, usec;
480
481         sec = e->tv_sec - s->tv_sec;
482         usec = (e->tv_usec - s->tv_usec);
483         if (sec > 0 && usec < 0) {
484                 sec--;
485                 usec += 1000000;
486         }
487
488         if (sec < 0 || (sec == 0 && usec < 0))
489                 return 0;
490
491         sec *= 1000;
492         usec /= 1000;
493         return sec + usec;
494 }
495
496 uint64_t mtime_since_now(const struct timespec *s)
497 {
498         struct timespec t;
499 #ifdef FIO_DEBUG_TIME
500         void *p = __builtin_return_address(0);
501
502         fio_gettime(&t, p);
503 #else
504         fio_gettime(&t, NULL);
505 #endif
506
507         return mtime_since(s, &t);
508 }
509
510 uint64_t mtime_since(const struct timespec *s, const struct timespec *e)
511 {
512         int64_t sec, usec;
513
514         sec = e->tv_sec - s->tv_sec;
515         usec = (e->tv_nsec - s->tv_nsec) / 1000;
516         if (sec > 0 && usec < 0) {
517                 sec--;
518                 usec += 1000000;
519         }
520
521         if (sec < 0 || (sec == 0 && usec < 0))
522                 return 0;
523
524         sec *= 1000;
525         usec /= 1000;
526         return sec + usec;
527 }
528
529 uint64_t time_since_now(const struct timespec *s)
530 {
531         return mtime_since_now(s) / 1000;
532 }
533
534 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)  && \
535     defined(CONFIG_SFAA)
536
537 #define CLOCK_ENTRIES_DEBUG     100000
538 #define CLOCK_ENTRIES_TEST      10000
539
540 struct clock_entry {
541         uint32_t seq;
542         uint32_t cpu;
543         uint64_t tsc;
544 };
545
546 struct clock_thread {
547         pthread_t thread;
548         int cpu;
549         int debug;
550         pthread_mutex_t lock;
551         pthread_mutex_t started;
552         unsigned long nr_entries;
553         uint32_t *seq;
554         struct clock_entry *entries;
555 };
556
557 static inline uint32_t atomic32_inc_return(uint32_t *seq)
558 {
559         return 1 + __sync_fetch_and_add(seq, 1);
560 }
561
562 static void *clock_thread_fn(void *data)
563 {
564         struct clock_thread *t = data;
565         struct clock_entry *c;
566         os_cpu_mask_t cpu_mask;
567         uint32_t last_seq;
568         unsigned long long first;
569         int i;
570
571         if (fio_cpuset_init(&cpu_mask)) {
572                 int __err = errno;
573
574                 log_err("clock cpuset init failed: %s\n", strerror(__err));
575                 goto err_out;
576         }
577
578         fio_cpu_set(&cpu_mask, t->cpu);
579
580         if (fio_setaffinity(gettid(), cpu_mask) == -1) {
581                 int __err = errno;
582
583                 log_err("clock setaffinity failed: %s\n", strerror(__err));
584                 goto err;
585         }
586
587         pthread_mutex_lock(&t->lock);
588         pthread_mutex_unlock(&t->started);
589
590         first = get_cpu_clock();
591         last_seq = 0;
592         c = &t->entries[0];
593         for (i = 0; i < t->nr_entries; i++, c++) {
594                 uint32_t seq;
595                 uint64_t tsc;
596
597                 c->cpu = t->cpu;
598                 do {
599                         seq = atomic32_inc_return(t->seq);
600                         if (seq < last_seq)
601                                 break;
602                         tsc = get_cpu_clock();
603                 } while (seq != *t->seq);
604
605                 c->seq = seq;
606                 c->tsc = tsc;
607         }
608
609         if (t->debug) {
610                 unsigned long long clocks;
611
612                 clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
613                 log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
614                                                         clocks, first);
615         }
616
617         /*
618          * The most common platform clock breakage is returning zero
619          * indefinitely. Check for that and return failure.
620          */
621         if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
622                 goto err;
623
624         fio_cpuset_exit(&cpu_mask);
625         return NULL;
626 err:
627         fio_cpuset_exit(&cpu_mask);
628 err_out:
629         return (void *) 1;
630 }
631
632 static int clock_cmp(const void *p1, const void *p2)
633 {
634         const struct clock_entry *c1 = p1;
635         const struct clock_entry *c2 = p2;
636
637         if (c1->seq == c2->seq)
638                 log_err("cs: bug in atomic sequence!\n");
639
640         return c1->seq - c2->seq;
641 }
642
643 int fio_monotonic_clocktest(int debug)
644 {
645         struct clock_thread *cthreads;
646         unsigned int nr_cpus = cpus_online();
647         struct clock_entry *entries;
648         unsigned long nr_entries, tentries, failed = 0;
649         struct clock_entry *prev, *this;
650         uint32_t seq = 0;
651         unsigned int i;
652
653         if (debug) {
654                 log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
655
656 #ifdef FIO_INC_DEBUG
657                 fio_debug |= 1U << FD_TIME;
658 #endif
659                 nr_entries = CLOCK_ENTRIES_DEBUG;
660         } else
661                 nr_entries = CLOCK_ENTRIES_TEST;
662
663         calibrate_cpu_clock();
664
665         if (debug) {
666 #ifdef FIO_INC_DEBUG
667                 fio_debug &= ~(1U << FD_TIME);
668 #endif
669         }
670
671         cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
672         tentries = nr_entries * nr_cpus;
673         entries = malloc(tentries * sizeof(struct clock_entry));
674
675         if (debug)
676                 log_info("cs: Testing %u CPUs\n", nr_cpus);
677
678         for (i = 0; i < nr_cpus; i++) {
679                 struct clock_thread *t = &cthreads[i];
680
681                 t->cpu = i;
682                 t->debug = debug;
683                 t->seq = &seq;
684                 t->nr_entries = nr_entries;
685                 t->entries = &entries[i * nr_entries];
686                 pthread_mutex_init(&t->lock, NULL);
687                 pthread_mutex_init(&t->started, NULL);
688                 pthread_mutex_lock(&t->lock);
689                 if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) {
690                         failed++;
691                         nr_cpus = i;
692                         break;
693                 }
694         }
695
696         for (i = 0; i < nr_cpus; i++) {
697                 struct clock_thread *t = &cthreads[i];
698
699                 pthread_mutex_lock(&t->started);
700         }
701
702         for (i = 0; i < nr_cpus; i++) {
703                 struct clock_thread *t = &cthreads[i];
704
705                 pthread_mutex_unlock(&t->lock);
706         }
707
708         for (i = 0; i < nr_cpus; i++) {
709                 struct clock_thread *t = &cthreads[i];
710                 void *ret;
711
712                 pthread_join(t->thread, &ret);
713                 if (ret)
714                         failed++;
715         }
716         free(cthreads);
717
718         if (failed) {
719                 if (debug)
720                         log_err("Clocksource test: %lu threads failed\n", failed);
721                 goto err;
722         }
723
724         qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
725
726         /* silence silly gcc */
727         prev = NULL;
728         for (failed = i = 0; i < tentries; i++) {
729                 this = &entries[i];
730
731                 if (!i) {
732                         prev = this;
733                         continue;
734                 }
735
736                 if (prev->tsc > this->tsc) {
737                         uint64_t diff = prev->tsc - this->tsc;
738
739                         if (!debug) {
740                                 failed++;
741                                 break;
742                         }
743
744                         log_info("cs: CPU clock mismatch (diff=%llu):\n",
745                                                 (unsigned long long) diff);
746                         log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
747                         log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
748                         failed++;
749                 }
750
751                 prev = this;
752         }
753
754         if (debug) {
755                 if (failed)
756                         log_info("cs: Failed: %lu\n", failed);
757                 else
758                         log_info("cs: Pass!\n");
759         }
760 err:
761         free(entries);
762         return !!failed;
763 }
764
765 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
766
767 int fio_monotonic_clocktest(int debug)
768 {
769         if (debug)
770                 log_info("cs: current platform does not support CPU clocks\n");
771         return 1;
772 }
773
774 #endif