#if defined(ARCH_HAVE_CPU_CLOCK)
#ifndef ARCH_CPU_CLOCK_CYCLES_PER_USEC
-static unsigned long cycles_per_msec;
+static unsigned long long cycles_per_msec;
static unsigned long long cycles_start;
static unsigned long long clock_mult;
static unsigned long long max_cycles_mask;
static unsigned int cycles_wrap;
#endif
#endif
-int tsc_reliable = 0;
+bool tsc_reliable = false;
struct tv_valid {
int warned;
(unsigned long long) maxc, mean, S);
max_ticks = MAX_CLOCK_SEC * cycles_per_msec * 1000ULL;
- max_mult = ULLONG_MAX / max_ticks;
- dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, max_mult=%llu\n",
- max_ticks, __builtin_clzll(max_ticks), max_mult);
+ max_mult = ULLONG_MAX / max_ticks;
+ dprint(FD_TIME, "\n\nmax_ticks=%llu, __builtin_clzll=%d, "
+ "max_mult=%llu\n", max_ticks,
+ __builtin_clzll(max_ticks), max_mult);
/*
* Find the largest shift count that will produce
dprint(FD_TIME, "tmp=%llu, sft=%u\n", tmp, sft);
}
- clock_shift = sft;
- clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
- dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift, clock_mult);
+ clock_shift = sft;
+ clock_mult = (1ULL << sft) * 1000000 / cycles_per_msec;
+ dprint(FD_TIME, "clock_shift=%u, clock_mult=%llu\n", clock_shift,
+ clock_mult);
- // Find the greatest power of 2 clock ticks that is less than the ticks in MAX_CLOCK_SEC_2STAGE
+ /*
+ * Find the greatest power of 2 clock ticks that is less than the
+ * ticks in MAX_CLOCK_SEC_2STAGE
+ */
max_cycles_shift = max_cycles_mask = 0;
tmp = MAX_CLOCK_SEC * 1000ULL * cycles_per_msec;
- dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
+ dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp,
+ max_cycles_shift);
while (tmp > 1) {
tmp >>= 1;
max_cycles_shift++;
dprint(FD_TIME, "tmp=%llu, max_cycles_shift=%u\n", tmp, max_cycles_shift);
}
- // if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec here we will
- // have a discontinuity every (1ULL << max_cycles_shift) cycles
- nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult) >> clock_shift;
+ /*
+ * if use use (1ULL << max_cycles_shift) * 1000 / cycles_per_msec
+ * here we will have a discontinuity every
+ * (1ULL << max_cycles_shift) cycles
+ */
+ nsecs_for_max_cycles = ((1ULL << max_cycles_shift) * clock_mult)
+ >> clock_shift;
- // Use a bitmask to calculate ticks % (1ULL << max_cycles_shift)
+ /* Use a bitmask to calculate ticks % (1ULL << max_cycles_shift) */
for (tmp = 0; tmp < max_cycles_shift; tmp++)
max_cycles_mask |= 1ULL << tmp;
- dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, nsecs_for_max_cycles=%llu, max_cycles_mask=%016llx\n",
- max_cycles_shift, (1ULL << max_cycles_shift),
- nsecs_for_max_cycles, max_cycles_mask);
+ dprint(FD_TIME, "max_cycles_shift=%u, 2^max_cycles_shift=%llu, "
+ "nsecs_for_max_cycles=%llu, "
+ "max_cycles_mask=%016llx\n",
+ max_cycles_shift, (1ULL << max_cycles_shift),
+ nsecs_for_max_cycles, max_cycles_mask);
cycles_start = get_cpu_clock();
dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
fio_clock_source_inited = fio_clock_source;
if (calibrate_cpu_clock())
- tsc_reliable = 0;
+ tsc_reliable = false;
/*
* If the arch sets tsc_reliable != 0, then it must be good enough
fio_clock_source = CS_CPUCLOCK;
} else if (fio_clock_source == CS_CPUCLOCK)
log_info("fio: clocksource=cpu may not be reliable\n");
+ dprint(FD_TIME, "gettime: clocksource=%d\n", (int) fio_clock_source);
}
uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
return nsec + (sec * 1000000000LL);
}
+uint64_t ntime_since_now(const struct timespec *s)
+{
+ struct timespec now;
+
+ fio_gettime(&now, NULL);
+ return ntime_since(s, &now);
+}
+
uint64_t utime_since(const struct timespec *s, const struct timespec *e)
{
int64_t sec, usec;
}
#if defined(FIO_HAVE_CPU_AFFINITY) && defined(ARCH_HAVE_CPU_CLOCK) && \
- defined(CONFIG_SFAA)
+ defined(CONFIG_SYNC_SYNC) && defined(CONFIG_CMP_SWAP)
#define CLOCK_ENTRIES_DEBUG 100000
-#define CLOCK_ENTRIES_TEST 10000
+#define CLOCK_ENTRIES_TEST 1000
struct clock_entry {
uint32_t seq;
struct clock_entry *entries;
};
-static inline uint32_t atomic32_inc_return(uint32_t *seq)
+static inline uint32_t atomic32_compare_and_swap(uint32_t *ptr, uint32_t old,
+ uint32_t new)
{
- return 1 + __sync_fetch_and_add(seq, 1);
+ return __sync_val_compare_and_swap(ptr, old, new);
}
static void *clock_thread_fn(void *data)
struct clock_thread *t = data;
struct clock_entry *c;
os_cpu_mask_t cpu_mask;
- uint32_t last_seq;
unsigned long long first;
int i;
pthread_mutex_unlock(&t->started);
first = get_cpu_clock();
- last_seq = 0;
c = &t->entries[0];
for (i = 0; i < t->nr_entries; i++, c++) {
uint32_t seq;
c->cpu = t->cpu;
do {
- seq = atomic32_inc_return(t->seq);
- if (seq < last_seq)
+ seq = *t->seq;
+ if (seq == UINT_MAX)
break;
+ __sync_synchronize();
tsc = get_cpu_clock();
- } while (seq != *t->seq);
+ } while (seq != atomic32_compare_and_swap(t->seq, seq, seq + 1));
+
+ if (seq == UINT_MAX)
+ break;
c->seq = seq;
c->tsc = tsc;
* The most common platform clock breakage is returning zero
* indefinitely. Check for that and return failure.
*/
- if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
+ if (i > 1 && !t->entries[i - 1].tsc && !t->entries[0].tsc)
goto err;
fio_cpuset_exit(&cpu_mask);