summaryrefslogtreecommitdiff
path: root/src/include/liburing/barrier.h
diff options
context:
space:
mode:
authorTaylor <contact@taylor.fish>2020-03-07 19:46:37 -0800
committerTaylor <contact@taylor.fish>2020-03-08 14:47:42 -0700
commit6f35a622743f4f2fd6b1d06b104763f5e587977d (patch)
treefaffae2210e9f1f60c7a29b8fc86419f4c9f6ea1 /src/include/liburing/barrier.h
parent4c7ae1de848dc852bcc5853ab0a012102764fa93 (diff)
downloadliburing-6f35a622743f4f2fd6b1d06b104763f5e587977d.tar.gz
liburing-6f35a622743f4f2fd6b1d06b104763f5e587977d.tar.bz2
Use __atomic builtins in barrier.h
io_uring_smp_store_release() and io_uring_smp_load_acquire() currently issue full sequentially consistent memory barriers (as opposed to release and acquire barriers, respectively) on every architecture except x86(-64) and 64-bit ARM. For architectures that aren't handled explicitly, we can instead use GCC's __atomic builtins, which were introduced in GCC 4.7.
Diffstat (limited to 'src/include/liburing/barrier.h')
-rw-r--r--src/include/liburing/barrier.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h
index 3df70ea..c0070bc 100644
--- a/src/include/liburing/barrier.h
+++ b/src/include/liburing/barrier.h
@@ -80,6 +80,10 @@ do { \
#define io_uring_smp_mb() __sync_synchronize()
#define io_uring_smp_rmb() __sync_synchronize()
#define io_uring_smp_wmb() __sync_synchronize()
+
+#define io_uring_smp_store_release(p, v) \
+ __atomic_store_n(p, v, __ATOMIC_RELEASE)
+#define io_uring_smp_load_acquire(p) __atomic_load_n(p, __ATOMIC_ACQUIRE)
#endif /* defined(__x86_64__) || defined(__i386__) */
/* From tools/include/asm/barrier.h */