summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2020-06-21 14:24:36 -0700
committerBart Van Assche <bvanassche@acm.org>2020-06-21 19:47:25 -0700
commit5fa0c063c6fe405200ec97e8f3703446dd8d82bc (patch)
tree67acd112885fcf11af52db8360d58e8ae15d5f9b /lib
parent0a602473807bcdaf28a30441ca05b9e16b6ad128 (diff)
downloadfio-5fa0c063c6fe405200ec97e8f3703446dd8d82bc.tar.gz
fio-5fa0c063c6fe405200ec97e8f3703446dd8d82bc.tar.bz2
Optimize the seqlock implementation
Use atomic_load_acquire() and atomic_store_release() instead of barriers where appropriate. Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/seqlock.h9
1 files changed, 3 insertions, 6 deletions
diff --git a/lib/seqlock.h b/lib/seqlock.h
index 762b6ec1..afa9fd31 100644
--- a/lib/seqlock.h
+++ b/lib/seqlock.h
@@ -18,13 +18,12 @@ static inline unsigned int read_seqlock_begin(struct seqlock *s)
unsigned int seq;
do {
- seq = s->sequence;
+ seq = atomic_load_acquire(&s->sequence);
if (!(seq & 1))
break;
nop;
} while (1);
- read_barrier();
return seq;
}
@@ -36,14 +35,12 @@ static inline bool read_seqlock_retry(struct seqlock *s, unsigned int seq)
static inline void write_seqlock_begin(struct seqlock *s)
{
- s->sequence++;
- write_barrier();
+ s->sequence = atomic_load_acquire(&s->sequence) + 1;
}
static inline void write_seqlock_end(struct seqlock *s)
{
- write_barrier();
- s->sequence++;
+ atomic_store_release(&s->sequence, s->sequence + 1);
}
#endif