summaryrefslogtreecommitdiff
path: root/src/include/liburing/barrier.h
diff options
context:
space:
mode:
authorTaylor <contact@taylor.fish>2020-03-08 16:05:59 -0700
committerTaylor <contact@taylor.fish>2020-03-08 16:05:59 -0700
commitb2c67e993f4c2cb782a0db46b67e9b00800ba674 (patch)
treea377e5ef68d3a139269ceec77bda4dfdf533b8e0 /src/include/liburing/barrier.h
parentf3424944af6e6148be39a68d451c5bf16b0f6b77 (diff)
downloadliburing-b2c67e993f4c2cb782a0db46b67e9b00800ba674.tar.gz
liburing-b2c67e993f4c2cb782a0db46b67e9b00800ba674.tar.bz2
Remove unused code in barrier.h
Diffstat (limited to 'src/include/liburing/barrier.h')
-rw-r--r--src/include/liburing/barrier.h52
1 files changed, 2 insertions, 50 deletions
diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h
index c0070bc..ad69506 100644
--- a/src/include/liburing/barrier.h
+++ b/src/include/liburing/barrier.h
@@ -34,19 +34,6 @@ after the acquire operation executes. This is implemented using
#if defined(__x86_64__) || defined(__i386__)
/* Adapted from arch/x86/include/asm/barrier.h */
-#define io_uring_mb() asm volatile("mfence" ::: "memory")
-#define io_uring_rmb() asm volatile("lfence" ::: "memory")
-#define io_uring_wmb() asm volatile("sfence" ::: "memory")
-#define io_uring_smp_rmb() io_uring_barrier()
-#define io_uring_smp_wmb() io_uring_barrier()
-#if defined(__i386__)
-#define io_uring_smp_mb() asm volatile("lock; addl $0,0(%%esp)" \
- ::: "memory", "cc")
-#else
-#define io_uring_smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" \
- ::: "memory", "cc")
-#endif
-
#define io_uring_smp_store_release(p, v) \
do { \
io_uring_barrier(); \
@@ -60,49 +47,14 @@ do { \
___p1; \
})
-#elif defined(__aarch64__)
-/* Adapted from arch/arm64/include/asm/barrier.h */
-#define io_uring_dmb(opt) asm volatile("dmb " #opt : : : "memory")
-#define io_uring_dsb(opt) asm volatile("dsb " #opt : : : "memory")
-
-#define io_uring_mb() io_uring_dsb(sy)
-#define io_uring_rmb() io_uring_dsb(ld)
-#define io_uring_wmb() io_uring_dsb(st)
-#define io_uring_smp_mb() io_uring_dmb(ish)
-#define io_uring_smp_rmb() io_uring_dmb(ishld)
-#define io_uring_smp_wmb() io_uring_dmb(ishst)
-
-#else /* defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) */
+#else /* defined(__x86_64__) || defined(__i386__) */
/*
- * Add arch appropriate definitions. Be safe and use full barriers for
+ * Add arch appropriate definitions. Use built-in atomic operations for
* archs we don't have support for.
*/
-#define io_uring_smp_mb() __sync_synchronize()
-#define io_uring_smp_rmb() __sync_synchronize()
-#define io_uring_smp_wmb() __sync_synchronize()
-
#define io_uring_smp_store_release(p, v) \
__atomic_store_n(p, v, __ATOMIC_RELEASE)
#define io_uring_smp_load_acquire(p) __atomic_load_n(p, __ATOMIC_ACQUIRE)
#endif /* defined(__x86_64__) || defined(__i386__) */
-/* From tools/include/asm/barrier.h */
-
-#ifndef io_uring_smp_store_release
-#define io_uring_smp_store_release(p, v) \
-do { \
- io_uring_smp_mb(); \
- IO_URING_WRITE_ONCE(*p, v); \
-} while (0)
-#endif
-
-#ifndef io_uring_smp_load_acquire
-#define io_uring_smp_load_acquire(p) \
-({ \
- __typeof(*p) ___p1 = IO_URING_READ_ONCE(*p); \
- io_uring_smp_mb(); \
- ___p1; \
-})
-#endif
-
#endif /* defined(LIBURING_BARRIER_H) */