Commit | Line | Data |
---|---|---|
02bcaa8c | 1 | /* |
f5cc024a | 2 | * Clock functions |
02bcaa8c | 3 | */ |
f5cc024a | 4 | |
02bcaa8c | 5 | #include <unistd.h> |
c223da83 | 6 | #include <math.h> |
02bcaa8c | 7 | #include <sys/time.h> |
03e20d68 | 8 | #include <time.h> |
02bcaa8c JA |
9 | |
10 | #include "fio.h" | |
be4ecfdf | 11 | #include "smalloc.h" |
02bcaa8c JA |
12 | |
13 | #include "hash.h" | |
7d11f871 | 14 | #include "os/os.h" |
02bcaa8c | 15 | |
919e789d | 16 | #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC) |
c223da83 | 17 | static unsigned long cycles_per_usec; |
71339117 | 18 | static unsigned long inv_cycles_per_usec; |
09a32402 | 19 | #endif |
4de98eb0 | 20 | int tsc_reliable = 0; |
5d879392 JA |
21 | |
22 | struct tv_valid { | |
ba458c2f | 23 | uint64_t last_cycles; |
9eb271b9 | 24 | uint64_t last_tv_valid; |
5d879392 | 25 | }; |
67bf9823 | 26 | #ifdef CONFIG_TLS_THREAD |
b4ea84da | 27 | static __thread struct tv_valid static_tv_valid; |
67bf9823 | 28 | #else |
5d879392 | 29 | static pthread_key_t tv_tls_key; |
67bf9823 | 30 | #endif |
02bcaa8c | 31 | |
16de1bf9 | 32 | enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE; |
fa80feae | 33 | int fio_clock_source_set = 0; |
10aa136b | 34 | static enum fio_cs fio_clock_source_inited = CS_INVAL; |
c223da83 | 35 | |
02bcaa8c JA |
36 | #ifdef FIO_DEBUG_TIME |
37 | ||
38 | #define HASH_BITS 8 | |
39 | #define HASH_SIZE (1 << HASH_BITS) | |
40 | ||
01743ee1 | 41 | static struct flist_head hash[HASH_SIZE]; |
02bcaa8c JA |
42 | static int gtod_inited; |
43 | ||
44 | struct gtod_log { | |
01743ee1 | 45 | struct flist_head list; |
02bcaa8c JA |
46 | void *caller; |
47 | unsigned long calls; | |
48 | }; | |
49 | ||
50 | static struct gtod_log *find_hash(void *caller) | |
51 | { | |
52 | unsigned long h = hash_ptr(caller, HASH_BITS); | |
01743ee1 | 53 | struct flist_head *entry; |
02bcaa8c | 54 | |
01743ee1 JA |
55 | flist_for_each(entry, &hash[h]) { |
56 | struct gtod_log *log = flist_entry(entry, struct gtod_log, | |
57 | list); | |
02bcaa8c JA |
58 | |
59 | if (log->caller == caller) | |
60 | return log; | |
61 | } | |
62 | ||
63 | return NULL; | |
64 | } | |
65 | ||
66 | static struct gtod_log *find_log(void *caller) | |
67 | { | |
68 | struct gtod_log *log = find_hash(caller); | |
69 | ||
70 | if (!log) { | |
71 | unsigned long h; | |
72 | ||
73 | log = malloc(sizeof(*log)); | |
01743ee1 | 74 | INIT_FLIST_HEAD(&log->list); |
02bcaa8c JA |
75 | log->caller = caller; |
76 | log->calls = 0; | |
77 | ||
78 | h = hash_ptr(caller, HASH_BITS); | |
01743ee1 | 79 | flist_add_tail(&log->list, &hash[h]); |
02bcaa8c JA |
80 | } |
81 | ||
82 | return log; | |
83 | } | |
84 | ||
85 | static void gtod_log_caller(void *caller) | |
86 | { | |
87 | if (gtod_inited) { | |
88 | struct gtod_log *log = find_log(caller); | |
89 | ||
90 | log->calls++; | |
91 | } | |
92 | } | |
93 | ||
94 | static void fio_exit fio_dump_gtod(void) | |
95 | { | |
96 | unsigned long total_calls = 0; | |
97 | int i; | |
98 | ||
99 | for (i = 0; i < HASH_SIZE; i++) { | |
01743ee1 | 100 | struct flist_head *entry; |
02bcaa8c JA |
101 | struct gtod_log *log; |
102 | ||
01743ee1 JA |
103 | flist_for_each(entry, &hash[i]) { |
104 | log = flist_entry(entry, struct gtod_log, list); | |
02bcaa8c | 105 | |
5ec10eaa JA |
106 | printf("function %p, calls %lu\n", log->caller, |
107 | log->calls); | |
02bcaa8c JA |
108 | total_calls += log->calls; |
109 | } | |
110 | } | |
111 | ||
112 | printf("Total %lu gettimeofday\n", total_calls); | |
113 | } | |
114 | ||
115 | static void fio_init gtod_init(void) | |
116 | { | |
117 | int i; | |
118 | ||
119 | for (i = 0; i < HASH_SIZE; i++) | |
01743ee1 | 120 | INIT_FLIST_HEAD(&hash[i]); |
02bcaa8c JA |
121 | |
122 | gtod_inited = 1; | |
123 | } | |
124 | ||
125 | #endif /* FIO_DEBUG_TIME */ | |
126 | ||
67bf9823 | 127 | #ifdef CONFIG_CLOCK_GETTIME |
9ff1c070 JA |
128 | static int fill_clock_gettime(struct timespec *ts) |
129 | { | |
67bf9823 | 130 | #ifdef CONFIG_CLOCK_MONOTONIC |
9ff1c070 JA |
131 | return clock_gettime(CLOCK_MONOTONIC, ts); |
132 | #else | |
133 | return clock_gettime(CLOCK_REALTIME, ts); | |
134 | #endif | |
135 | } | |
1e97cce9 | 136 | #endif |
67bf9823 | 137 | |
9eb271b9 | 138 | static void __fio_gettime(struct timeval *tp) |
02bcaa8c | 139 | { |
93f0b09a | 140 | struct tv_valid *tv; |
5d879392 | 141 | |
67bf9823 JA |
142 | #ifdef CONFIG_TLS_THREAD |
143 | tv = &static_tv_valid; | |
144 | #else | |
93f0b09a | 145 | tv = pthread_getspecific(tv_tls_key); |
67bf9823 | 146 | #endif |
93f0b09a | 147 | |
c223da83 | 148 | switch (fio_clock_source) { |
67bf9823 | 149 | #ifdef CONFIG_GETTIMEOFDAY |
c223da83 | 150 | case CS_GTOD: |
02bcaa8c | 151 | gettimeofday(tp, NULL); |
c223da83 | 152 | break; |
67bf9823 JA |
153 | #endif |
154 | #ifdef CONFIG_CLOCK_GETTIME | |
c223da83 | 155 | case CS_CGETTIME: { |
02bcaa8c JA |
156 | struct timespec ts; |
157 | ||
9ff1c070 | 158 | if (fill_clock_gettime(&ts) < 0) { |
c223da83 JA |
159 | log_err("fio: clock_gettime fails\n"); |
160 | assert(0); | |
02bcaa8c JA |
161 | } |
162 | ||
163 | tp->tv_sec = ts.tv_sec; | |
164 | tp->tv_usec = ts.tv_nsec / 1000; | |
c223da83 JA |
165 | break; |
166 | } | |
67bf9823 | 167 | #endif |
c223da83 JA |
168 | #ifdef ARCH_HAVE_CPU_CLOCK |
169 | case CS_CPUCLOCK: { | |
ba458c2f | 170 | uint64_t usecs, t; |
c223da83 JA |
171 | |
172 | t = get_cpu_clock(); | |
9eb271b9 JA |
173 | if (t < tv->last_cycles && tv->last_tv_valid) |
174 | log_err("fio: CPU clock going back in time\n"); | |
c223da83 | 175 | |
9eb271b9 JA |
176 | tv->last_cycles = t; |
177 | tv->last_tv_valid = 1; | |
919e789d CE |
178 | #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC |
179 | usecs = t / ARCH_CPU_CLOCK_CYCLES_PER_USEC; | |
180 | #else | |
71339117 | 181 | usecs = (t * inv_cycles_per_usec) / 16777216UL; |
919e789d | 182 | #endif |
c223da83 JA |
183 | tp->tv_sec = usecs / 1000000; |
184 | tp->tv_usec = usecs % 1000000; | |
c223da83 JA |
185 | break; |
186 | } | |
187 | #endif | |
188 | default: | |
189 | log_err("fio: invalid clock source %d\n", fio_clock_source); | |
190 | break; | |
02bcaa8c | 191 | } |
67bf9823 JA |
192 | } |
193 | ||
194 | #ifdef FIO_DEBUG_TIME | |
195 | void fio_gettime(struct timeval *tp, void *caller) | |
196 | #else | |
197 | void fio_gettime(struct timeval *tp, void fio_unused *caller) | |
198 | #endif | |
199 | { | |
67bf9823 JA |
200 | #ifdef FIO_DEBUG_TIME |
201 | if (!caller) | |
202 | caller = __builtin_return_address(0); | |
203 | ||
204 | gtod_log_caller(caller); | |
205 | #endif | |
225ba9e3 | 206 | if (fio_unlikely(fio_tv)) { |
67bf9823 JA |
207 | memcpy(tp, fio_tv, sizeof(*tp)); |
208 | return; | |
209 | } | |
210 | ||
9eb271b9 | 211 | __fio_gettime(tp); |
02bcaa8c | 212 | } |
be4ecfdf | 213 | |
919e789d | 214 | #if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC) |
c223da83 JA |
215 | static unsigned long get_cycles_per_usec(void) |
216 | { | |
217 | struct timeval s, e; | |
ba458c2f | 218 | uint64_t c_s, c_e; |
67bf9823 | 219 | enum fio_cs old_cs = fio_clock_source; |
c223da83 | 220 | |
67bf9823 JA |
221 | #ifdef CONFIG_CLOCK_GETTIME |
222 | fio_clock_source = CS_CGETTIME; | |
223 | #else | |
224 | fio_clock_source = CS_GTOD; | |
225 | #endif | |
226 | __fio_gettime(&s); | |
9ff1c070 | 227 | |
c223da83 JA |
228 | c_s = get_cpu_clock(); |
229 | do { | |
ba458c2f | 230 | uint64_t elapsed; |
c223da83 | 231 | |
67bf9823 | 232 | __fio_gettime(&e); |
9ff1c070 | 233 | |
c223da83 | 234 | elapsed = utime_since(&s, &e); |
486332e5 | 235 | if (elapsed >= 1280) { |
c223da83 JA |
236 | c_e = get_cpu_clock(); |
237 | break; | |
238 | } | |
239 | } while (1); | |
240 | ||
67bf9823 | 241 | fio_clock_source = old_cs; |
c011000d | 242 | return (c_e - c_s + 127) >> 7; |
c223da83 JA |
243 | } |
244 | ||
fa80feae JA |
245 | #define NR_TIME_ITERS 50 |
246 | ||
e259879e | 247 | static int calibrate_cpu_clock(void) |
c223da83 JA |
248 | { |
249 | double delta, mean, S; | |
ba458c2f | 250 | uint64_t avg, cycles[NR_TIME_ITERS]; |
c223da83 JA |
251 | int i, samples; |
252 | ||
c223da83 JA |
253 | cycles[0] = get_cycles_per_usec(); |
254 | S = delta = mean = 0.0; | |
fa80feae | 255 | for (i = 0; i < NR_TIME_ITERS; i++) { |
c223da83 JA |
256 | cycles[i] = get_cycles_per_usec(); |
257 | delta = cycles[i] - mean; | |
258 | if (delta) { | |
259 | mean += delta / (i + 1.0); | |
260 | S += delta * (cycles[i] - mean); | |
261 | } | |
262 | } | |
263 | ||
e259879e JA |
264 | /* |
265 | * The most common platform clock breakage is returning zero | |
266 | * indefinitely. Check for that and return failure. | |
267 | */ | |
268 | if (!cycles[0] && !cycles[NR_TIME_ITERS - 1]) | |
269 | return 1; | |
270 | ||
fa80feae | 271 | S = sqrt(S / (NR_TIME_ITERS - 1.0)); |
c223da83 JA |
272 | |
273 | samples = avg = 0; | |
fa80feae | 274 | for (i = 0; i < NR_TIME_ITERS; i++) { |
c223da83 JA |
275 | double this = cycles[i]; |
276 | ||
03e20d68 | 277 | if ((fmax(this, mean) - fmin(this, mean)) > S) |
c223da83 JA |
278 | continue; |
279 | samples++; | |
280 | avg += this; | |
281 | } | |
282 | ||
fa80feae | 283 | S /= (double) NR_TIME_ITERS; |
89db727d | 284 | mean /= 10.0; |
c223da83 | 285 | |
fa80feae | 286 | for (i = 0; i < NR_TIME_ITERS; i++) |
4b91ee8f JA |
287 | dprint(FD_TIME, "cycles[%d]=%llu\n", i, |
288 | (unsigned long long) cycles[i] / 10); | |
c223da83 | 289 | |
d7abad3d | 290 | avg /= samples; |
b0ff22d7 | 291 | avg = (avg + 5) / 10; |
4b91ee8f | 292 | dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg); |
c223da83 JA |
293 | dprint(FD_TIME, "mean=%f, S=%f\n", mean, S); |
294 | ||
295 | cycles_per_usec = avg; | |
71339117 JA |
296 | inv_cycles_per_usec = 16777216UL / cycles_per_usec; |
297 | dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec); | |
e259879e | 298 | return 0; |
09a32402 JA |
299 | } |
300 | #else | |
e259879e | 301 | static int calibrate_cpu_clock(void) |
09a32402 | 302 | { |
919e789d CE |
303 | #ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC |
304 | return 0; | |
305 | #else | |
e259879e | 306 | return 1; |
09a32402 | 307 | #endif |
919e789d CE |
308 | } |
309 | #endif // ARCH_HAVE_CPU_CLOCK | |
09a32402 | 310 | |
67bf9823 | 311 | #ifndef CONFIG_TLS_THREAD |
5d879392 JA |
312 | void fio_local_clock_init(int is_thread) |
313 | { | |
314 | struct tv_valid *t; | |
315 | ||
572cfb3f | 316 | t = calloc(1, sizeof(*t)); |
9eb271b9 | 317 | if (pthread_setspecific(tv_tls_key, t)) { |
5d879392 | 318 | log_err("fio: can't set TLS key\n"); |
9eb271b9 JA |
319 | assert(0); |
320 | } | |
5d879392 JA |
321 | } |
322 | ||
323 | static void kill_tv_tls_key(void *data) | |
324 | { | |
325 | free(data); | |
326 | } | |
67bf9823 JA |
327 | #else |
328 | void fio_local_clock_init(int is_thread) | |
329 | { | |
330 | } | |
331 | #endif | |
5d879392 | 332 | |
09a32402 JA |
333 | void fio_clock_init(void) |
334 | { | |
01423eae JA |
335 | if (fio_clock_source == fio_clock_source_inited) |
336 | return; | |
337 | ||
67bf9823 | 338 | #ifndef CONFIG_TLS_THREAD |
5d879392 JA |
339 | if (pthread_key_create(&tv_tls_key, kill_tv_tls_key)) |
340 | log_err("fio: can't create TLS key\n"); | |
67bf9823 | 341 | #endif |
5d879392 | 342 | |
01423eae | 343 | fio_clock_source_inited = fio_clock_source; |
e259879e JA |
344 | |
345 | if (calibrate_cpu_clock()) | |
346 | tsc_reliable = 0; | |
fa80feae JA |
347 | |
348 | /* | |
349 | * If the arch sets tsc_reliable != 0, then it must be good enough | |
350 | * to use as THE clock source. For x86 CPUs, this means the TSC | |
351 | * runs at a constant rate and is synced across CPU cores. | |
352 | */ | |
353 | if (tsc_reliable) { | |
354 | if (!fio_clock_source_set) | |
355 | fio_clock_source = CS_CPUCLOCK; | |
356 | } else if (fio_clock_source == CS_CPUCLOCK) | |
357 | log_info("fio: clocksource=cpu may not be reliable\n"); | |
c223da83 JA |
358 | } |
359 | ||
1f440ece | 360 | uint64_t utime_since(const struct timeval *s, const struct timeval *e) |
be4ecfdf | 361 | { |
39ab7da2 | 362 | long sec, usec; |
aa60bc58 | 363 | uint64_t ret; |
39ab7da2 JA |
364 | |
365 | sec = e->tv_sec - s->tv_sec; | |
366 | usec = e->tv_usec - s->tv_usec; | |
367 | if (sec > 0 && usec < 0) { | |
368 | sec--; | |
369 | usec += 1000000; | |
370 | } | |
371 | ||
372 | /* | |
373 | * time warp bug on some kernels? | |
374 | */ | |
375 | if (sec < 0 || (sec == 0 && usec < 0)) | |
376 | return 0; | |
377 | ||
378 | ret = sec * 1000000ULL + usec; | |
379 | ||
380 | return ret; | |
be4ecfdf JA |
381 | } |
382 | ||
1f440ece | 383 | uint64_t utime_since_now(const struct timeval *s) |
be4ecfdf | 384 | { |
39ab7da2 JA |
385 | struct timeval t; |
386 | ||
387 | fio_gettime(&t, NULL); | |
388 | return utime_since(s, &t); | |
be4ecfdf | 389 | } |
783a3eb1 | 390 | |
1f440ece | 391 | uint64_t mtime_since(const struct timeval *s, const struct timeval *e) |
783a3eb1 | 392 | { |
39ab7da2 | 393 | long sec, usec, ret; |
783a3eb1 | 394 | |
39ab7da2 JA |
395 | sec = e->tv_sec - s->tv_sec; |
396 | usec = e->tv_usec - s->tv_usec; | |
397 | if (sec > 0 && usec < 0) { | |
398 | sec--; | |
399 | usec += 1000000; | |
783a3eb1 JA |
400 | } |
401 | ||
39ab7da2 JA |
402 | if (sec < 0 || (sec == 0 && usec < 0)) |
403 | return 0; | |
404 | ||
405 | sec *= 1000UL; | |
406 | usec /= 1000UL; | |
407 | ret = sec + usec; | |
408 | ||
409 | return ret; | |
783a3eb1 JA |
410 | } |
411 | ||
1f440ece | 412 | uint64_t mtime_since_now(const struct timeval *s) |
783a3eb1 | 413 | { |
39ab7da2 JA |
414 | struct timeval t; |
415 | void *p = __builtin_return_address(0); | |
783a3eb1 | 416 | |
39ab7da2 JA |
417 | fio_gettime(&t, p); |
418 | return mtime_since(s, &t); | |
419 | } | |
783a3eb1 | 420 | |
1f440ece | 421 | uint64_t time_since_now(const struct timeval *s) |
39ab7da2 JA |
422 | { |
423 | return mtime_since_now(s) / 1000; | |
783a3eb1 | 424 | } |
7d11f871 | 425 | |
67bf9823 JA |
426 | #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \ |
427 | defined(CONFIG_SFAA) | |
7d11f871 JA |
428 | |
429 | #define CLOCK_ENTRIES 100000 | |
430 | ||
431 | struct clock_entry { | |
58002f9a JA |
432 | uint32_t seq; |
433 | uint32_t cpu; | |
ba458c2f | 434 | uint64_t tsc; |
7d11f871 JA |
435 | }; |
436 | ||
437 | struct clock_thread { | |
438 | pthread_t thread; | |
439 | int cpu; | |
440 | pthread_mutex_t lock; | |
441 | pthread_mutex_t started; | |
58002f9a | 442 | uint32_t *seq; |
7d11f871 JA |
443 | struct clock_entry *entries; |
444 | }; | |
445 | ||
58002f9a | 446 | static inline uint32_t atomic32_inc_return(uint32_t *seq) |
7d11f871 JA |
447 | { |
448 | return 1 + __sync_fetch_and_add(seq, 1); | |
449 | } | |
450 | ||
451 | static void *clock_thread_fn(void *data) | |
452 | { | |
453 | struct clock_thread *t = data; | |
454 | struct clock_entry *c; | |
455 | os_cpu_mask_t cpu_mask; | |
58002f9a | 456 | uint32_t last_seq; |
7d11f871 JA |
457 | int i; |
458 | ||
459 | memset(&cpu_mask, 0, sizeof(cpu_mask)); | |
460 | fio_cpu_set(&cpu_mask, t->cpu); | |
461 | ||
462 | if (fio_setaffinity(gettid(), cpu_mask) == -1) { | |
463 | log_err("clock setaffinity failed\n"); | |
464 | return (void *) 1; | |
465 | } | |
466 | ||
7d11f871 | 467 | pthread_mutex_lock(&t->lock); |
b9b3498e | 468 | pthread_mutex_unlock(&t->started); |
7d11f871 | 469 | |
58002f9a | 470 | last_seq = 0; |
7d11f871 JA |
471 | c = &t->entries[0]; |
472 | for (i = 0; i < CLOCK_ENTRIES; i++, c++) { | |
58002f9a JA |
473 | uint32_t seq; |
474 | uint64_t tsc; | |
7d11f871 JA |
475 | |
476 | c->cpu = t->cpu; | |
477 | do { | |
58002f9a JA |
478 | seq = atomic32_inc_return(t->seq); |
479 | if (seq < last_seq) | |
480 | break; | |
7d11f871 JA |
481 | tsc = get_cpu_clock(); |
482 | } while (seq != *t->seq); | |
483 | ||
484 | c->seq = seq; | |
485 | c->tsc = tsc; | |
486 | } | |
487 | ||
0f78b22c JA |
488 | log_info("cs: cpu%3d: %llu clocks seen\n", t->cpu, |
489 | (unsigned long long) t->entries[i - 1].tsc - t->entries[0].tsc); | |
58002f9a | 490 | |
e259879e JA |
491 | /* |
492 | * The most common platform clock breakage is returning zero | |
493 | * indefinitely. Check for that and return failure. | |
494 | */ | |
58002f9a | 495 | if (!t->entries[i - 1].tsc && !t->entries[0].tsc) |
e259879e JA |
496 | return (void *) 1; |
497 | ||
7d11f871 JA |
498 | return NULL; |
499 | } | |
500 | ||
501 | static int clock_cmp(const void *p1, const void *p2) | |
502 | { | |
503 | const struct clock_entry *c1 = p1; | |
504 | const struct clock_entry *c2 = p2; | |
505 | ||
b9b3498e JA |
506 | if (c1->seq == c2->seq) |
507 | log_err("cs: bug in atomic sequence!\n"); | |
508 | ||
7d11f871 JA |
509 | return c1->seq - c2->seq; |
510 | } | |
511 | ||
512 | int fio_monotonic_clocktest(void) | |
513 | { | |
8a1db9a1 | 514 | struct clock_thread *cthreads; |
7d11f871 JA |
515 | unsigned int nr_cpus = cpus_online(); |
516 | struct clock_entry *entries; | |
814917be | 517 | unsigned long tentries, failed = 0; |
80da8a8f | 518 | struct clock_entry *prev, *this; |
58002f9a | 519 | uint32_t seq = 0; |
caa3eb1c | 520 | unsigned int i; |
7d11f871 | 521 | |
d5e3f5d8 JA |
522 | log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no"); |
523 | ||
b5b571a3 | 524 | #ifdef FIO_INC_DEBUG |
4f1d43c2 | 525 | fio_debug |= 1U << FD_TIME; |
b5b571a3 | 526 | #endif |
4f1d43c2 | 527 | calibrate_cpu_clock(); |
b5b571a3 | 528 | #ifdef FIO_INC_DEBUG |
4f1d43c2 | 529 | fio_debug &= ~(1U << FD_TIME); |
b5b571a3 | 530 | #endif |
4f1d43c2 | 531 | |
8a1db9a1 | 532 | cthreads = malloc(nr_cpus * sizeof(struct clock_thread)); |
7d11f871 JA |
533 | tentries = CLOCK_ENTRIES * nr_cpus; |
534 | entries = malloc(tentries * sizeof(struct clock_entry)); | |
535 | ||
536 | log_info("cs: Testing %u CPUs\n", nr_cpus); | |
537 | ||
538 | for (i = 0; i < nr_cpus; i++) { | |
8a1db9a1 | 539 | struct clock_thread *t = &cthreads[i]; |
7d11f871 JA |
540 | |
541 | t->cpu = i; | |
542 | t->seq = &seq; | |
543 | t->entries = &entries[i * CLOCK_ENTRIES]; | |
544 | pthread_mutex_init(&t->lock, NULL); | |
545 | pthread_mutex_init(&t->started, NULL); | |
546 | pthread_mutex_lock(&t->lock); | |
6b0110cd JA |
547 | if (pthread_create(&t->thread, NULL, clock_thread_fn, t)) { |
548 | failed++; | |
549 | nr_cpus = i; | |
550 | break; | |
551 | } | |
7d11f871 JA |
552 | } |
553 | ||
554 | for (i = 0; i < nr_cpus; i++) { | |
8a1db9a1 | 555 | struct clock_thread *t = &cthreads[i]; |
7d11f871 JA |
556 | |
557 | pthread_mutex_lock(&t->started); | |
558 | } | |
559 | ||
560 | for (i = 0; i < nr_cpus; i++) { | |
8a1db9a1 | 561 | struct clock_thread *t = &cthreads[i]; |
7d11f871 JA |
562 | |
563 | pthread_mutex_unlock(&t->lock); | |
564 | } | |
565 | ||
814917be | 566 | for (i = 0; i < nr_cpus; i++) { |
8a1db9a1 | 567 | struct clock_thread *t = &cthreads[i]; |
7d11f871 JA |
568 | void *ret; |
569 | ||
570 | pthread_join(t->thread, &ret); | |
571 | if (ret) | |
572 | failed++; | |
573 | } | |
8a1db9a1 | 574 | free(cthreads); |
7d11f871 JA |
575 | |
576 | if (failed) { | |
4e0a8fa2 | 577 | log_err("Clocksource test: %lu threads failed\n", failed); |
7d11f871 JA |
578 | goto err; |
579 | } | |
580 | ||
581 | qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp); | |
582 | ||
583 | for (failed = i = 0; i < tentries; i++) { | |
80da8a8f | 584 | this = &entries[i]; |
7d11f871 JA |
585 | |
586 | if (!i) { | |
587 | prev = this; | |
588 | continue; | |
589 | } | |
590 | ||
591 | if (prev->tsc > this->tsc) { | |
592 | uint64_t diff = prev->tsc - this->tsc; | |
593 | ||
4e0a8fa2 JA |
594 | log_info("cs: CPU clock mismatch (diff=%llu):\n", |
595 | (unsigned long long) diff); | |
596 | log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq); | |
597 | log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq); | |
7d11f871 JA |
598 | failed++; |
599 | } | |
600 | ||
601 | prev = this; | |
602 | } | |
603 | ||
604 | if (failed) | |
605 | log_info("cs: Failed: %lu\n", failed); | |
606 | else | |
607 | log_info("cs: Pass!\n"); | |
608 | ||
609 | err: | |
610 | free(entries); | |
611 | return !!failed; | |
612 | } | |
613 | ||
614 | #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */ | |
615 | ||
616 | int fio_monotonic_clocktest(void) | |
617 | { | |
618 | log_info("cs: current platform does not support CPU clocks\n"); | |
619 | return 0; | |
620 | } | |
621 | ||
622 | #endif |