int last_tv_valid;
};
#ifdef CONFIG_TLS_THREAD
-static struct tv_valid __thread static_tv_valid;
+static __thread struct tv_valid static_tv_valid;
#else
static pthread_key_t tv_tls_key;
#endif
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
-enum fio_cs fio_clock_source_inited = CS_INVAL;
+static enum fio_cs fio_clock_source_inited = CS_INVAL;
#ifdef FIO_DEBUG_TIME
gtod_log_caller(caller);
#endif
- if (fio_tv) {
+ if (fio_unlikely(fio_tv)) {
memcpy(tp, fio_tv, sizeof(*tp));
return;
}
mean /= 10.0;
for (i = 0; i < NR_TIME_ITERS; i++)
- dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
+ dprint(FD_TIME, "cycles[%d]=%llu\n", i,
+ (unsigned long long) cycles[i] / 10);
avg /= samples;
avg = (avg + 5) / 10;
- dprint(FD_TIME, "avg: %lu\n", avg);
+ dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
{
struct tv_valid *t;
- t = calloc(sizeof(*t), 1);
+ t = calloc(1, sizeof(*t));
if (pthread_setspecific(tv_tls_key, t))
log_err("fio: can't set TLS key\n");
}
#define CLOCK_ENTRIES 100000
struct clock_entry {
- uint64_t seq;
+ uint32_t seq;
+ uint32_t cpu;
uint64_t tsc;
- uint64_t cpu;
};
struct clock_thread {
int cpu;
pthread_mutex_t lock;
pthread_mutex_t started;
- uint64_t *seq;
+ uint32_t *seq;
struct clock_entry *entries;
};
-static inline uint64_t atomic64_inc_return(uint64_t *seq)
+static inline uint32_t atomic32_inc_return(uint32_t *seq)
{
return 1 + __sync_fetch_and_add(seq, 1);
}
struct clock_thread *t = data;
struct clock_entry *c;
os_cpu_mask_t cpu_mask;
+ uint32_t last_seq;
int i;
memset(&cpu_mask, 0, sizeof(cpu_mask));
pthread_mutex_lock(&t->lock);
pthread_mutex_unlock(&t->started);
+ last_seq = 0;
c = &t->entries[0];
for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
- uint64_t seq, tsc;
+ uint32_t seq;
+ uint64_t tsc;
c->cpu = t->cpu;
do {
- seq = atomic64_inc_return(t->seq);
+ seq = atomic32_inc_return(t->seq);
+ if (seq < last_seq)
+ break;
tsc = get_cpu_clock();
} while (seq != *t->seq);
c->tsc = tsc;
}
- log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[CLOCK_ENTRIES - 1].tsc - t->entries[0].tsc);
+ log_info("cs: cpu%3d: %llu clocks seen\n", t->cpu,
+ (unsigned long long) t->entries[i - 1].tsc - t->entries[0].tsc);
+
/*
* The most common platform clock breakage is returning zero
* indefinitely. Check for that and return failure.
*/
- if (!t->entries[CLOCK_ENTRIES - 1].tsc && !t->entries[0].tsc)
+ if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
return (void *) 1;
return NULL;
struct clock_entry *entries;
unsigned long tentries, failed;
struct clock_entry *prev, *this;
- uint64_t seq = 0;
+ uint32_t seq = 0;
int i;
log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
free(threads);
if (failed) {
- log_err("Clocksource test: %u threads failed\n", failed);
+ log_err("Clocksource test: %lu threads failed\n", failed);
goto err;
}
if (prev->tsc > this->tsc) {
uint64_t diff = prev->tsc - this->tsc;
- log_info("cs: CPU clock mismatch (diff=%lu):\n", diff);
- log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", prev->cpu, prev->tsc, prev->seq);
- log_info("\t CPU%3lu: TSC=%lu, SEQ=%lu\n", this->cpu, this->tsc, this->seq);
+ log_info("cs: CPU clock mismatch (diff=%llu):\n",
+ (unsigned long long) diff);
+ log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
+ log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", this->cpu, (unsigned long long) this->tsc, this->seq);
failed++;
}