summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-12-17 08:51:23 -0700
committerJens Axboe <axboe@fb.com>2014-12-17 08:51:23 -0700
commit73df3e072e210c93934d21668cc8adba4ae74d77 (patch)
tree813777193a77cc5a98c2700f9cc222fa3ec5b634
parentbf2921f02651653c6fd6794449b725b138f2611c (diff)
downloadfio-73df3e072e210c93934d21668cc8adba4ae74d77.tar.gz
fio-73df3e072e210c93934d21668cc8adba4ae74d77.tar.bz2
gettime: offset CPU cycle counter by initial value
Should then be safe for the full 2^64 cycles, and we push the more expensive variable division to later in the run (which is probably never). Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--gettime.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/gettime.c b/gettime.c
index 7e3e2e5b..a95b5452 100644
--- a/gettime.c
+++ b/gettime.c
@@ -17,6 +17,7 @@
static unsigned long cycles_per_usec;
static unsigned long inv_cycles_per_usec;
static uint64_t max_cycles_for_mult;
+static unsigned long long cycles_start, cycles_wrap;
#endif
int tsc_reliable = 0;
@@ -169,12 +170,16 @@ static void __fio_gettime(struct timeval *tp)
uint64_t usecs, t;
t = get_cpu_clock();
- if (t < tv->last_cycles && tv->last_tv_valid &&
- !tv->warned) {
- log_err("fio: CPU clock going back in time\n");
- tv->warned = 1;
+ if (t < cycles_start && !cycles_wrap)
+ cycles_wrap = 1;
+ else if (cycles_wrap && t >= cycles_start) {
+ if (!tv->warned) {
+ log_err("fio: double CPU clock wrap\n");
+ tv->warned = 1;
+ }
}
+ t -= cycles_start;
tv->last_cycles = t;
tv->last_tv_valid = 1;
#ifdef ARCH_CPU_CLOCK_CYCLES_PER_USEC
@@ -299,6 +304,8 @@ static int calibrate_cpu_clock(void)
inv_cycles_per_usec = 16777216UL / cycles_per_usec;
max_cycles_for_mult = ~0ULL / inv_cycles_per_usec;
dprint(FD_TIME, "inv_cycles_per_usec=%lu\n", inv_cycles_per_usec);
+ cycles_start = get_cpu_clock();
+ dprint(FD_TIME, "cycles_start=%llu\n", cycles_start);
return 0;
}
#else