summaryrefslogtreecommitdiff
path: root/gettime.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2012-12-09 20:29:00 +0100
committerJens Axboe <axboe@kernel.dk>2012-12-09 20:29:00 +0100
commitfa80feae51331fb170e784459fa1359d7ec3a963 (patch)
treeaa23a345eb99081d44b1afb92bf60c7d4d6eec10 /gettime.c
parent5a90bb5f50f641a65f879ae09dbd65440e0ab2a6 (diff)
downloadfio-fa80feae51331fb170e784459fa1359d7ec3a963.tar.gz
fio-fa80feae51331fb170e784459fa1359d7ec3a963.tar.bz2
Add check for invariant TSC on x86 and use TSC is default clock if reliable
TSC is by far the fastest clock we can use. Check the CPUID bits for whether it is both constant rate AND synced across cores. If it is, we can use it as our default clock source. Fio will default to this clock source on x86 if no other clock source is specifically given with clocksource= in the job file. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'gettime.c')
-rw-r--r--gettime.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/gettime.c b/gettime.c
index 35d685e1..89a3a016 100644
--- a/gettime.c
+++ b/gettime.c
@@ -15,11 +15,13 @@
#ifdef ARCH_HAVE_CPU_CLOCK
static unsigned long cycles_per_usec;
static unsigned long last_cycles;
+int tsc_reliable = 0;
#endif
static struct timeval last_tv;
static int last_tv_valid;
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
+int fio_clock_source_set = 0;
#ifdef FIO_DEBUG_TIME
@@ -208,15 +210,17 @@ static unsigned long get_cycles_per_usec(void)
return c_e - c_s;
}
+#define NR_TIME_ITERS 50
+
static void calibrate_cpu_clock(void)
{
double delta, mean, S;
- unsigned long avg, cycles[10];
+ unsigned long avg, cycles[NR_TIME_ITERS];
int i, samples;
cycles[0] = get_cycles_per_usec();
S = delta = mean = 0.0;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < NR_TIME_ITERS; i++) {
cycles[i] = get_cycles_per_usec();
delta = cycles[i] - mean;
if (delta) {
@@ -225,10 +229,10 @@ static void calibrate_cpu_clock(void)
}
}
- S = sqrt(S / (10 - 1.0));
+ S = sqrt(S / (NR_TIME_ITERS - 1.0));
samples = avg = 0;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < NR_TIME_ITERS; i++) {
double this = cycles[i];
if ((fmax(this, mean) - fmin(this, mean)) > S)
@@ -237,10 +241,10 @@ static void calibrate_cpu_clock(void)
avg += this;
}
- S /= 10.0;
- mean /= 10.0;
+ S /= (double) NR_TIME_ITERS;
+ mean /= (double) NR_TIME_ITERS;
- for (i = 0; i < 10; i++)
+ for (i = 0; i < NR_TIME_ITERS; i++)
dprint(FD_TIME, "cycles[%d]=%lu\n", i, cycles[i] / 10);
avg /= (samples * 10);
@@ -248,7 +252,6 @@ static void calibrate_cpu_clock(void)
dprint(FD_TIME, "mean=%f, S=%f\n", mean, S);
cycles_per_usec = avg;
-
}
#else
static void calibrate_cpu_clock(void)
@@ -260,6 +263,17 @@ void fio_clock_init(void)
{
last_tv_valid = 0;
calibrate_cpu_clock();
+
+ /*
+ * If the arch sets tsc_reliable != 0, then it must be good enough
+ * to use as THE clock source. For x86 CPUs, this means the TSC
+ * runs at a constant rate and is synced across CPU cores.
+ */
+ if (tsc_reliable) {
+ if (!fio_clock_source_set)
+ fio_clock_source = CS_CPUCLOCK;
+ } else if (fio_clock_source == CS_CPUCLOCK)
+ log_info("fio: clocksource=cpu may not be reliable\n");
}
unsigned long long utime_since(struct timeval *s, struct timeval *e)