Consistently indent using tabs in Windows files and remove trailing spaces.
[fio.git] / gettime.c
1 /*
2  * Clock functions
3  */
4
5 #include <unistd.h>
6 #include <math.h>
7 #include <sys/time.h>
8 #include <time.h>
9
10 #include "fio.h"
11 #include "smalloc.h"
12
13 #include "hash.h"
14 #include "os/os.h"
15
16 #ifdef ARCH_HAVE_CPU_CLOCK
17 static unsigned long cycles_per_usec;
18 int tsc_reliable = 0;
19 #endif
20
21 struct tv_valid {
22         struct timeval last_tv;
23         int last_tv_valid;
24         unsigned long last_cycles;
25 };
26 static pthread_key_t tv_tls_key;
27
28 enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
29 int fio_clock_source_set = 0;
30 enum fio_cs fio_clock_source_inited = CS_INVAL;
31
32 #ifdef FIO_DEBUG_TIME
33
34 #define HASH_BITS       8
35 #define HASH_SIZE       (1 << HASH_BITS)
36
37 static struct flist_head hash[HASH_SIZE];
38 static int gtod_inited;
39
40 struct gtod_log {
41         struct flist_head list;
42         void *caller;
43         unsigned long calls;
44 };
45
46 static struct gtod_log *find_hash(void *caller)
47 {
48         unsigned long h = hash_ptr(caller, HASH_BITS);
49         struct flist_head *entry;
50
51         flist_for_each(entry, &hash[h]) {
52                 struct gtod_log *log = flist_entry(entry, struct gtod_log,
53                                                                         list);
54
55                 if (log->caller == caller)
56                         return log;
57         }
58
59         return NULL;
60 }
61
62 static struct gtod_log *find_log(void *caller)
63 {
64         struct gtod_log *log = find_hash(caller);
65
66         if (!log) {
67                 unsigned long h;
68
69                 log = malloc(sizeof(*log));
70                 INIT_FLIST_HEAD(&log->list);
71                 log->caller = caller;
72                 log->calls = 0;
73
74                 h = hash_ptr(caller, HASH_BITS);
75                 flist_add_tail(&log->list, &hash[h]);
76         }
77
78         return log;
79 }
80
81 static void gtod_log_caller(void *caller)
82 {
83         if (gtod_inited) {
84                 struct gtod_log *log = find_log(caller);
85
86                 log->calls++;
87         }
88 }
89
90 static void fio_exit fio_dump_gtod(void)
91 {
92         unsigned long total_calls = 0;
93         int i;
94
95         for (i = 0; i < HASH_SIZE; i++) {
96                 struct flist_head *entry;
97                 struct gtod_log *log;
98
99                 flist_for_each(entry, &hash[i]) {
100                         log = flist_entry(entry, struct gtod_log, list);
101
102                         printf("function %p, calls %lu\n", log->caller,
103                                                                 log->calls);
104                         total_calls += log->calls;
105                 }
106         }
107
108         printf("Total %lu gettimeofday\n", total_calls);
109 }
110
111 static void fio_init gtod_init(void)
112 {
113         int i;
114
115         for (i = 0; i < HASH_SIZE; i++)
116                 INIT_FLIST_HEAD(&hash[i]);
117
118         gtod_inited = 1;
119 }
120
121 #endif /* FIO_DEBUG_TIME */
122
123 #ifdef FIO_DEBUG_TIME
124 void fio_gettime(struct timeval *tp, void *caller)
125 #else
126 void fio_gettime(struct timeval *tp, void fio_unused *caller)
127 #endif
128 {
129         struct tv_valid *tv;
130
131 #ifdef FIO_DEBUG_TIME
132         if (!caller)
133                 caller = __builtin_return_address(0);
134
135         gtod_log_caller(caller);
136 #endif
137         if (fio_tv) {
138                 memcpy(tp, fio_tv, sizeof(*tp));
139                 return;
140         }
141
142         tv = pthread_getspecific(tv_tls_key);
143
144         switch (fio_clock_source) {
145         case CS_GTOD:
146                 gettimeofday(tp, NULL);
147                 break;
148         case CS_CGETTIME: {
149                 struct timespec ts;
150
151 #ifdef FIO_HAVE_CLOCK_MONOTONIC
152                 if (clock_gettime(CLOCK_MONOTONIC, &ts) < 0) {
153 #else
154                 if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
155 #endif
156                         log_err("fio: clock_gettime fails\n");
157                         assert(0);
158                 }
159
160                 tp->tv_sec = ts.tv_sec;
161                 tp->tv_usec = ts.tv_nsec / 1000;
162                 break;
163                 }
164 #ifdef ARCH_HAVE_CPU_CLOCK
165         case CS_CPUCLOCK: {
166                 unsigned long long usecs, t;
167
168                 t = get_cpu_clock();
169                 if (tv && t < tv->last_cycles) {
170                         dprint(FD_TIME, "CPU clock going back in time\n");
171                         t = tv->last_cycles;
172                 } else if (tv)
173                         tv->last_cycles = t;
174
175                 usecs = t / cycles_per_usec;
176                 tp->tv_sec = usecs / 1000000;
177                 tp->tv_usec = usecs % 1000000;
178                 break;
179                 }
180 #endif
181         default:
182                 log_err("fio: invalid clock source %d\n", fio_clock_source);
183                 break;
184         }
185
186         /*
187          * If Linux is using the tsc clock on non-synced processors,
188          * sometimes time can appear to drift backwards. Fix that up.
189          */
190         if (tv) {
191                 if (tv->last_tv_valid) {
192                         if (tp->tv_sec < tv->last_tv.tv_sec)
193                                 tp->tv_sec = tv->last_tv.tv_sec;
194                         else if (tv->last_tv.tv_sec == tp->tv_sec &&
195                                  tp->tv_usec < tv->last_tv.tv_usec)
196                                 tp->tv_usec = tv->last_tv.tv_usec;
197                 }
198                 tv->last_tv_valid = 1;
199                 memcpy(&tv->last_tv, tp, sizeof(*tp));
200         }
201 }
202
203 #ifdef ARCH_HAVE_CPU_CLOCK
204 static unsigned long get_cycles_per_usec(void)
205 {
206         struct timeval s, e;
207         unsigned long long c_s, c_e;
208
209         gettimeofday(&s, NULL);
210         c_s = get_cpu_clock();
211         do {
212                 unsigned long long elapsed;
213
214                 gettimeofday(&e, NULL);
215                 elapsed = utime_since(&s, &e);
216                 if (elapsed >= 1280) {
217                         c_e = get_cpu_clock();
218                         break;
219                 }
220         } while (1);
221
222         return (c_e - c_s + 127) >> 7;
223 }
224
225 #define NR_TIME_ITERS   50
226
227 static void calibrate_cpu_clock(void)
228 {
229         double delta, mean, S;
230         unsigned long avg, cycles[NR_TIME_ITERS];
231         int i, samples;
232
233         cycles[0] = get_cycles_per_usec();
234         S = delta = mean = 0.0;
235         for (i = 0; i < NR_TIME_ITERS; i++) {
236                 cycles[i] = get_cycles_per_usec();
237                 delta = cycles[i] - mean;
238                 if (delta) {
239                         mean += delta / (i + 1.0);
240                         S += delta * (cycles[i] - mean);
241                 }
242         }
243
244         S = sqrt(S / (NR_TIME_ITERS - 1.0));
245
246         samples = avg = 0;
247         for (i = 0; i < NR_TIME_ITERS; i++) {
248                 double this = cycles[i];
249
250                 if ((fmax(this, mean) - fmin(this, mean)) > S)
251                         continue;
252                 samples++;
253                 avg += this;
254         }
255
256         S /= (double) NR_TIME_ITERS;
257         mean /= 10.0;
258
259         for (i = 0; i < NR_TIME_ITERS; i++)
260                 dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
261
262         avg /= samples;
263         avg = (avg + 9) / 10;
264         dprint(FD_TIME, "avg: %lu\n", avg);
265         dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
266
267         cycles_per_usec = avg;
268 }
269 #else
270 static void calibrate_cpu_clock(void)
271 {
272 }
273 #endif
274
275 void fio_local_clock_init(int is_thread)
276 {
277         struct tv_valid *t;
278
279         t = calloc(sizeof(*t), 1);
280         if (pthread_setspecific(tv_tls_key, t))
281                 log_err("fio: can't set TLS key\n");
282 }
283
284 static void kill_tv_tls_key(void *data)
285 {
286         free(data);
287 }
288
289 void fio_clock_init(void)
290 {
291         if (fio_clock_source == fio_clock_source_inited)
292                 return;
293
294         if (pthread_key_create(&tv_tls_key, kill_tv_tls_key))
295                 log_err("fio: can't create TLS key\n");
296
297         fio_clock_source_inited = fio_clock_source;
298         calibrate_cpu_clock();
299
300         /*
301          * If the arch sets tsc_reliable != 0, then it must be good enough
302          * to use as THE clock source. For x86 CPUs, this means the TSC
303          * runs at a constant rate and is synced across CPU cores.
304          */
305         if (tsc_reliable) {
306                 if (!fio_clock_source_set)
307                         fio_clock_source = CS_CPUCLOCK;
308         } else if (fio_clock_source == CS_CPUCLOCK)
309                 log_info("fio: clocksource=cpu may not be reliable\n");
310 }
311
312 unsigned long long utime_since(struct timeval *s, struct timeval *e)
313 {
314         long sec, usec;
315         unsigned long long ret;
316
317         sec = e->tv_sec - s->tv_sec;
318         usec = e->tv_usec - s->tv_usec;
319         if (sec > 0 && usec < 0) {
320                 sec--;
321                 usec += 1000000;
322         }
323
324         /*
325          * time warp bug on some kernels?
326          */
327         if (sec < 0 || (sec == 0 && usec < 0))
328                 return 0;
329
330         ret = sec * 1000000ULL + usec;
331
332         return ret;
333 }
334
335 unsigned long long utime_since_now(struct timeval *s)
336 {
337         struct timeval t;
338
339         fio_gettime(&t, NULL);
340         return utime_since(s, &t);
341 }
342
343 unsigned long mtime_since(struct timeval *s, struct timeval *e)
344 {
345         long sec, usec, ret;
346
347         sec = e->tv_sec - s->tv_sec;
348         usec = e->tv_usec - s->tv_usec;
349         if (sec > 0 && usec < 0) {
350                 sec--;
351                 usec += 1000000;
352         }
353
354         if (sec < 0 || (sec == 0 && usec < 0))
355                 return 0;
356
357         sec *= 1000UL;
358         usec /= 1000UL;
359         ret = sec + usec;
360
361         return ret;
362 }
363
364 unsigned long mtime_since_now(struct timeval *s)
365 {
366         struct timeval t;
367         void *p = __builtin_return_address(0);
368
369         fio_gettime(&t, p);
370         return mtime_since(s, &t);
371 }
372
373 unsigned long time_since_now(struct timeval *s)
374 {
375         return mtime_since_now(s) / 1000;
376 }
377
378 #if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK)
379
380 #define CLOCK_ENTRIES   100000
381
382 struct clock_entry {
383         unsigned long seq;
384         unsigned long tsc;
385         unsigned long cpu;
386 };
387
388 struct clock_thread {
389         pthread_t thread;
390         int cpu;
391         pthread_mutex_t lock;
392         pthread_mutex_t started;
393         uint64_t *seq;
394         struct clock_entry *entries;
395 };
396
397 static inline uint64_t atomic64_inc_return(uint64_t *seq)
398 {
399         return 1 + __sync_fetch_and_add(seq, 1);
400 }
401
402 static void *clock_thread_fn(void *data)
403 {
404         struct clock_thread *t = data;
405         struct clock_entry *c;
406         os_cpu_mask_t cpu_mask;
407         int i;
408
409         memset(&cpu_mask, 0, sizeof(cpu_mask));
410         fio_cpu_set(&cpu_mask, t->cpu);
411
412         if (fio_setaffinity(gettid(), cpu_mask) == -1) {
413                 log_err("clock setaffinity failed\n");
414                 return (void *) 1;
415         }
416
417         pthread_mutex_lock(&t->lock);
418         pthread_mutex_unlock(&t->started);
419
420         c = &t->entries[0];
421         for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
422                 uint64_t seq, tsc;
423
424                 c->cpu = t->cpu;
425                 do {
426                         seq = atomic64_inc_return(t->seq);
427                         tsc = get_cpu_clock();
428                 } while (seq != *t->seq);
429
430                 c->seq = seq;
431                 c->tsc = tsc;
432         }
433
434         log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[CLOCK_ENTRIES - 1].tsc - t->entries[0].tsc);
435         return NULL;
436 }
437
438 static int clock_cmp(const void *p1, const void *p2)
439 {
440         const struct clock_entry *c1 = p1;
441         const struct clock_entry *c2 = p2;
442
443         if (c1->seq == c2->seq)
444                 log_err("cs: bug in atomic sequence!\n");
445
446         return c1->seq - c2->seq;
447 }
448
449 int fio_monotonic_clocktest(void)
450 {
451         struct clock_thread *threads;
452         unsigned int nr_cpus = cpus_online();
453         struct clock_entry *entries;
454         unsigned long tentries, failed;
455         uint64_t seq = 0;
456         int i;
457
458         fio_debug |= 1U << FD_TIME;
459         calibrate_cpu_clock();
460         fio_debug &= ~(1U << FD_TIME);
461
462         threads = malloc(nr_cpus * sizeof(struct clock_thread));
463         tentries = CLOCK_ENTRIES * nr_cpus;
464         entries = malloc(tentries * sizeof(struct clock_entry));
465
466         log_info("cs: Testing %u CPUs\n", nr_cpus);
467
468         for (i = 0; i < nr_cpus; i++) {
469                 struct clock_thread *t = &threads[i];
470
471                 t->cpu = i;
472                 t->seq = &seq;
473                 t->entries = &entries[i * CLOCK_ENTRIES];
474                 pthread_mutex_init(&t->lock, NULL);
475                 pthread_mutex_init(&t->started, NULL);
476                 pthread_mutex_lock(&t->lock);
477                 pthread_create(&t->thread, NULL, clock_thread_fn, t);
478         }
479
480         for (i = 0; i < nr_cpus; i++) {
481                 struct clock_thread *t = &threads[i];
482
483                 pthread_mutex_lock(&t->started);
484         }
485
486         for (i = 0; i < nr_cpus; i++) {
487                 struct clock_thread *t = &threads[i];
488
489                 pthread_mutex_unlock(&t->lock);
490         }
491
492         for (failed = i = 0; i < nr_cpus; i++) {
493                 struct clock_thread *t = &threads[i];
494                 void *ret;
495
496                 pthread_join(t->thread, &ret);
497                 if (ret)
498                         failed++;
499         }
500         free(threads);
501
502         if (failed) {
503                 log_err("Clocksource test: %u threads failed\n", failed);
504                 goto err;
505         }
506
507         qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
508
509         for (failed = i = 0; i < tentries; i++) {
510                 struct clock_entry *prev, *this = &entries[i];
511
512                 if (!i) {
513                         prev = this;
514                         continue;
515                 }
516
517                 if (prev->tsc > this->tsc) {
518                         uint64_t diff = prev->tsc - this->tsc;
519
520                         log_info("cs: CPU clock mismatch (diff=%lu):\n", diff);
521                         log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", prev->cpu, prev->tsc, prev->seq);
522                         log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", this->cpu, this->tsc, this->seq);
523                         failed++;
524                 }
525
526                 prev = this;
527         }
528
529         if (failed)
530                 log_info("cs: Failed: %lu\n", failed);
531         else
532                 log_info("cs: Pass!\n");
533
534 err:
535         free(entries);
536         return !!failed;
537 }
538
539 #else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
540
541 int fio_monotonic_clocktest(void)
542 {
543         log_info("cs: current platform does not support CPU clocks\n");
544         return 0;
545 }
546
547 #endif