summaryrefslogtreecommitdiff
path: root/gettime.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-02-25 10:23:58 +0100
committerJens Axboe <axboe@kernel.dk>2013-02-25 10:23:58 +0100
commit58002f9af5a2bdf3f20c7f427b62202c2564de2e (patch)
tree58c9378d8fc7e1ce420cca72706f76915b81e6a5 /gettime.c
parent4b91ee8fd12c72bd76ce9f5ff9116626b48566a0 (diff)
downloadfio-58002f9af5a2bdf3f20c7f427b62202c2564de2e.tar.gz
fio-58002f9af5a2bdf3f20c7f427b62202c2564de2e.tar.bz2
gettime: use 32-bit atomic sequences
Not all platforms have 64-bit wide atomic sync_and_fetch(). If we just check for overflow, it should be OK to use a 32-bit sequence number. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'gettime.c')
-rw-r--r--gettime.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/gettime.c b/gettime.c
index 352c1d3d..660ba4c8 100644
--- a/gettime.c
+++ b/gettime.c
@@ -441,9 +441,9 @@ uint64_t time_since_now(struct timeval *s)
#define CLOCK_ENTRIES 100000
struct clock_entry {
- uint64_t seq;
+ uint32_t seq;
+ uint32_t cpu;
uint64_t tsc;
- uint64_t cpu;
};
struct clock_thread {
@@ -451,11 +451,11 @@ struct clock_thread {
int cpu;
pthread_mutex_t lock;
pthread_mutex_t started;
- uint64_t *seq;
+ uint32_t *seq;
struct clock_entry *entries;
};
-static inline uint64_t atomic64_inc_return(uint64_t *seq)
+static inline uint32_t atomic32_inc_return(uint32_t *seq)
{
return 1 + __sync_fetch_and_add(seq, 1);
}
@@ -465,6 +465,7 @@ static void *clock_thread_fn(void *data)
struct clock_thread *t = data;
struct clock_entry *c;
os_cpu_mask_t cpu_mask;
+ uint32_t last_seq;
int i;
memset(&cpu_mask, 0, sizeof(cpu_mask));
@@ -478,13 +479,17 @@ static void *clock_thread_fn(void *data)
pthread_mutex_lock(&t->lock);
pthread_mutex_unlock(&t->started);
+ last_seq = 0;
c = &t->entries[0];
for (i = 0; i < CLOCK_ENTRIES; i++, c++) {
- uint64_t seq, tsc;
+ uint32_t seq;
+ uint64_t tsc;
c->cpu = t->cpu;
do {
- seq = atomic64_inc_return(t->seq);
+ seq = atomic32_inc_return(t->seq);
+ if (seq < last_seq)
+ break;
tsc = get_cpu_clock();
} while (seq != *t->seq);
@@ -492,12 +497,13 @@ static void *clock_thread_fn(void *data)
c->tsc = tsc;
}
- log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[CLOCK_ENTRIES - 1].tsc - t->entries[0].tsc);
+ log_info("cs: cpu%3d: %lu clocks seen\n", t->cpu, t->entries[i - 1].tsc - t->entries[0].tsc);
+
/*
* The most common platform clock breakage is returning zero
* indefinitely. Check for that and return failure.
*/
- if (!t->entries[CLOCK_ENTRIES - 1].tsc && !t->entries[0].tsc)
+ if (!t->entries[i - 1].tsc && !t->entries[0].tsc)
return (void *) 1;
return NULL;
@@ -521,7 +527,7 @@ int fio_monotonic_clocktest(void)
struct clock_entry *entries;
unsigned long tentries, failed;
struct clock_entry *prev, *this;
- uint64_t seq = 0;
+ uint32_t seq = 0;
int i;
log_info("cs: reliable_tsc: %s\n", tsc_reliable ? "yes" : "no");