#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long cycles_per_usec;
static unsigned long last_cycles;
+int tsc_reliable = 0;
#endif
static struct timeval last_tv;
static int last_tv_valid;
-static struct timeval *fio_tv;
-int fio_gtod_offload = 0;
-int fio_gtod_cpu = -1;
-
-enum fio_cs fio_clock_source = CS_GTOD;
+enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
+int fio_clock_source_set = 0;
#ifdef FIO_DEBUG_TIME
return c_e - c_s;
}
+#define NR_TIME_ITERS 50
+
static void calibrate_cpu_clock(void)
{
double delta, mean, S;
- unsigned long avg, cycles[10];
+ unsigned long avg, cycles[NR_TIME_ITERS];
int i, samples;
cycles[0] = get_cycles_per_usec();
S = delta = mean = 0.0;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < NR_TIME_ITERS; i++) {
cycles[i] = get_cycles_per_usec();
delta = cycles[i] - mean;
if (delta) {
}
}
- S = sqrt(S / (10 - 1.0));
+ S = sqrt(S / (NR_TIME_ITERS - 1.0));
samples = avg = 0;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < NR_TIME_ITERS; i++) {
double this = cycles[i];
if ((fmax(this, mean) - fmin(this, mean)) > S)
avg += this;
}
- S /= 10.0;
- mean /= 10.0;
+ S /= (double) NR_TIME_ITERS;
+ mean /= (double) NR_TIME_ITERS;
- for (i = 0; i < 10; i++)
+ for (i = 0; i < NR_TIME_ITERS; i++)
dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
avg /= (samples * 10);
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
-
}
#else
static void calibrate_cpu_clock(void)
{
last_tv_valid = 0;
calibrate_cpu_clock();
+
+ /*
+ * If the arch sets tsc_reliable != 0, then it must be good enough
+ * to use as THE clock source. For x86 CPUs, this means the TSC
+ * runs at a constant rate and is synced across CPU cores.
+ */
+ if (tsc_reliable) {
+ if (!fio_clock_source_set)
+ fio_clock_source = CS_CPUCLOCK;
+ } else if (fio_clock_source == CS_CPUCLOCK)
+ log_info("fio: clocksource=cpu may not be reliable\n");
+}
+
+unsigned long long utime_since(struct timeval *s, struct timeval *e)
+{
+ long sec, usec;
+ unsigned long long ret;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = e->tv_usec - s->tv_usec;
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ /*
+ * time warp bug on some kernels?
+ */
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ ret = sec * 1000000ULL + usec;
+
+ return ret;
+}
+
+unsigned long long utime_since_now(struct timeval *s)
+{
+ struct timeval t;
+
+ fio_gettime(&t, NULL);
+ return utime_since(s, &t);
+}
+
+unsigned long mtime_since(struct timeval *s, struct timeval *e)
+{
+ long sec, usec, ret;
+
+ sec = e->tv_sec - s->tv_sec;
+ usec = e->tv_usec - s->tv_usec;
+ if (sec > 0 && usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+
+ if (sec < 0 || (sec == 0 && usec < 0))
+ return 0;
+
+ sec *= 1000UL;
+ usec /= 1000UL;
+ ret = sec + usec;
+
+ return ret;
}
-void fio_gtod_init(void)
+unsigned long mtime_since_now(struct timeval *s)
{
- fio_tv = smalloc(sizeof(struct timeval));
- assert(fio_tv);
+ struct timeval t;
+ void *p = __builtin_return_address(0);
+
+ fio_gettime(&t, p);
+ return mtime_since(s, &t);
}
-void fio_gtod_update(void)
+unsigned long time_since_now(struct timeval *s)
{
- gettimeofday(fio_tv, NULL);
+ return mtime_since_now(s) / 1000;
}