#include "hash.h"
#include "os/os.h"
-#if defined(ARCH_HAVE_CPU_CLOCK) && !defined(ARCH_CPU_CLOCK_CYCLES_PER_USEC)
+#if defined(ARCH_HAVE_CPU_CLOCK)
+#ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
static unsigned long cycles_per_usec;
static unsigned long inv_cycles_per_usec;
static uint64_t max_cycles_for_mult;
+#endif
+#ifdef ARCH_CPU_CLOCK_WRAPS
static unsigned long long cycles_start, cycles_wrap;
#endif
+#endif
int tsc_reliable = 0;
struct tv_valid {
#ifdef CONFIG_CLOCK_GETTIME
static int fill_clock_gettime(struct timespec *ts)
{
-#ifdef CONFIG_CLOCK_MONOTONIC
+#if defined(CONFIG_CLOCK_MONOTONIC_RAW)
+ return clock_gettime(CLOCK_MONOTONIC_RAW, ts);
+#elif defined(CONFIG_CLOCK_MONOTONIC)
return clock_gettime(CLOCK_MONOTONIC, ts);
#else
return clock_gettime(CLOCK_REALTIME, ts);
#endif
t = get_cpu_clock();
+#ifdef ARCH_CPU_CLOCK_WRAPS
if (t < cycles_start && !cycles_wrap)
cycles_wrap = 1;
else if (cycles_wrap && t >= cycles_start && !tv->warned) {
}
t -= cycles_start;
+#endif
tv->last_cycles = t;
tv->last_tv_valid = 1;
#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
struct timeval s, e;
uint64_t c_s, c_e;
enum fio_cs old_cs = fio_clock_source;
+ uint64_t elapsed;
#ifdef CONFIG_CLOCK_GETTIME
fio_clock_source = CS_CGETTIME;
c_s = get_cpu_clock();
do {
- uint64_t elapsed;
-
__fio_gettime(&e);
elapsed = utime_since(&s, &e);
} while (1);
fio_clock_source = old_cs;
- return (c_e - c_s + 127) >> 7;
+ return (c_e - c_s) / elapsed;
}
#define NR_TIME_ITERS 50
static int calibrate_cpu_clock(void)
{
double delta, mean, S;
- uint64_t avg, cycles[NR_TIME_ITERS];
+ uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS];
int i, samples;
cycles[0] = get_cycles_per_usec();
S = sqrt(S / (NR_TIME_ITERS - 1.0));
- samples = avg = 0;
+ minc = -1ULL;
+ maxc = samples = avg = 0;
for (i = 0; i < NR_TIME_ITERS; i++) {
double this = cycles[i];
+ minc = min(cycles[i], minc);
+ maxc = max(cycles[i], maxc);
+
if ((fmax(this, mean) - fmin(this, mean)) > S)
continue;
samples++;
}
S /= (double) NR_TIME_ITERS;
- mean /= 10.0;
for (i = 0; i < NR_TIME_ITERS; i++)
- dprint(FD_TIME, "cycles[%d]=%llu\n", i,
- (unsigned long long) cycles[i] / 10);
+ dprint(FD_TIME, "cycles[%d]=%llu\n", i, (unsigned long long) cycles[i]);
avg /= samples;
- avg = (avg + 5) / 10;
dprint(FD_TIME, "avg: %llu\n", (unsigned long long) avg);
- dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
+ dprint(FD_TIME, "min=%llu, max=%llu, mean=%f, S=%f\n",
+ (unsigned long long) minc,
+ (unsigned long long) maxc, mean, S);
cycles_per_usec = avg;
inv_cycles_per_usec = 16777216UL / cycles_per_usec;
max_cycles_for_mult = ~0ULL / inv_cycles_per_usec;
dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
+#ifdef ARCH_CPU_CLOCK_WRAPS
cycles_start = get_cpu_clock();
dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
+#endif
return 0;
}
#else
* runs at a constant rate and is synced across CPU cores.
*/
if (tsc_reliable) {
- if (!fio_clock_source_set)
+ if (!fio_clock_source_set && !fio_monotonic_clocktest(0))
fio_clock_source = CS_CPUCLOCK;
} else if (fio_clock_source == CS_CPUCLOCK)
log_info("fio: clocksource=cpu may not be reliable\n");
#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
defined(CONFIG_SFAA)
-#define CLOCK_ENTRIES 100000
+#define CLOCK_ENTRIES_DEBUG 100000
+#define CLOCK_ENTRIES_TEST 10000
struct clock_entry {
uint32_t seq;
struct clock_thread {
pthread_t thread;
int cpu;
+ int debug;
pthread_mutex_t lock;
pthread_mutex_t started;
+ unsigned long nr_entries;
uint32_t *seq;
struct clock_entry *entries;
};
struct clock_entry *c;
os_cpu_mask_t cpu_mask;
uint32_t last_seq;
+ unsigned long long first;
int i;
- memset(&cpu_mask, 0, sizeof(cpu_mask));
+ if (fio_cpuset_init(&cpu_mask)) {
+ int __err = errno;
+
+ log_err("clock cpuset init failed: %s\n", strerror(__err));
+ goto err_out;
+ }
+
fio_cpu_set(&cpu_mask, t->cpu);
if (fio_setaffinity(gettid(), cpu_mask) == -1) {
- log_err("clock setaffinity failed\n");
- return (void *) 1;
+ int __err = errno;
+
+ log_err("clock setaffinity failed: %s\n", strerror(__err));
+ goto err;
}
pthread_mutex_lock(&t->lock);
pthread_mutex_unlock(&t->started);
+ first = get_cpu_clock();
last_seq = 0;
c = &t->entries[0];
- for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
+ for (i = 0; i < t->nr_entries; i++, c++) {
uint32_t seq;
uint64_t tsc;
c->tsc = tsc;
}
- log_info("cs: cpu%3d: %llu clocks seen\n", t->cpu,
- (unsigned long long) t->entries[i - 1].tsc - t->entries[0].tsc);
+ if (t->debug) {
+ unsigned long long clocks;
+
+ clocks = t->entries[i - 1].tsc - t->entries[0].tsc;
+ log_info("cs: cpu%3d: %llu clocks seen, first %llu\n", t->cpu,
+ clocks, first);
+ }
/*
* The most common platform clock breakage is returning zero
* indefinitely. Check for that and return failure.
*/
if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
- return (void *) 1;
+ goto err;
+ fio_cpuset_exit(&cpu_mask);
return NULL;
+err:
+ fio_cpuset_exit(&cpu_mask);
+err_out:
+ return (void *) 1;
}
static int clock_cmp(const void *p1, const void *p2)
return c1->seq - c2->seq;
}
-int fio_monotonic_clocktest(void)
+int fio_monotonic_clocktest(int debug)
{
struct clock_thread *cthreads;
unsigned int nr_cpus = cpus_online();
struct clock_entry *entries;
- unsigned long tentries, failed = 0;
+ unsigned long nr_entries, tentries, failed = 0;
struct clock_entry *prev, *this;
uint32_t seq = 0;
unsigned int i;
- log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
+ if (debug) {
+ log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");
#ifdef FIO_INC_DEBUG
- fio_debug |= 1U << FD_TIME;
+ fio_debug |= 1U << FD_TIME;
#endif
+ nr_entries = CLOCK_ENTRIES_DEBUG;
+ } else
+ nr_entries = CLOCK_ENTRIES_TEST;
+
calibrate_cpu_clock();
+
+ if (debug) {
#ifdef FIO_INC_DEBUG
- fio_debug &= ~(1U << FD_TIME);
+ fio_debug &= ~(1U << FD_TIME);
#endif
+ }
cthreads = malloc(nr_cpus * sizeof(struct clock_thread));
- tentries = CLOCK_ENTRIES * nr_cpus;
+ tentries = nr_entries * nr_cpus;
entries = malloc(tentries * sizeof(struct clock_entry));
- log_info("cs: Testing %u CPUs\n", nr_cpus);
+ if (debug)
+ log_info("cs: Testing %u CPUs\n", nr_cpus);
for (i = 0; i < nr_cpus; i++) {
struct clock_thread *t = &cthreads[i];
t->cpu = i;
+ t->debug = debug;
t->seq = &seq;
- t->entries = &entries[i * CLOCK_ENTRIES];
+ t->nr_entries = nr_entries;
+ t->entries = &entries[i * nr_entries];
pthread_mutex_init(&t->lock, NULL);
pthread_mutex_init(&t->started, NULL);
pthread_mutex_lock(&t->lock);
free(cthreads);
if (failed) {
- log_err("Clocksource test: %lu threads failed\n", failed);
+ if (debug)
+ log_err("Clocksource test: %lu threads failed\n", failed);
goto err;
}
qsort(entries, tentries, sizeof(struct clock_entry), clock_cmp);
+ /* silence silly gcc */
+ prev = NULL;
for (failed = i = 0; i < tentries; i++) {
this = &entries[i];
if (prev->tsc > this->tsc) {
uint64_t diff = prev->tsc - this->tsc;
+ if (!debug) {
+ failed++;
+ break;
+ }
+
log_info("cs: CPU clock mismatch (diff=%llu):\n",
(unsigned long long) diff);
log_info("\t CPU%3u: TSC=%llu, SEQ=%u\n", prev->cpu, (unsigned long long) prev->tsc, prev->seq);
prev = this;
}
- if (failed)
- log_info("cs: Failed: %lu\n", failed);
- else
- log_info("cs: Pass!\n");
-
+ if (debug) {
+ if (failed)
+ log_info("cs: Failed: %lu\n", failed);
+ else
+ log_info("cs: Pass!\n");
+ }
err:
free(entries);
return !!failed;
#else /* defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) */
-int fio_monotonic_clocktest(void)
+int fio_monotonic_clocktest(int debug)
{
- log_info("cs: current platform does not support CPU clocks\n");
- return 0;
+ if (debug)
+ log_info("cs: current platform does not support CPU clocks\n");
+ return 1;
}
#endif