arch,lib/seqlock: implement seqlock with C++ atomic if compiled with C++
[fio.git] / arch / arch.h
index 3ee5ac43e563eb0b5e3c72576e18d8729139ce3c..fca003beabf4e606aa1b80af853f973009e8c7ed 100644 (file)
@@ -1,15 +1,17 @@
 #ifndef ARCH_H
 #define ARCH_H
 
-#ifdef __WORDSIZE
-#define BITS_PER_LONG  __WORDSIZE
+#ifdef __cplusplus
+#include <atomic>
 #else
-#define BITS_PER_LONG  32
+#include <stdatomic.h>
 #endif
 
+#include "../lib/types.h"
+
 enum {
        arch_x86_64 = 1,
-       arch_i386,
+       arch_x86,
        arch_ppc,
        arch_ia64,
        arch_s390,
@@ -20,6 +22,7 @@ enum {
        arch_sh,
        arch_hppa,
        arch_mips,
+       arch_aarch64,
 
        arch_generic,
 
@@ -33,6 +36,41 @@ enum {
        ARCH_FLAG_4     = 1 << 3,
 };
 
+extern unsigned long arch_flags;
+
+#define ARCH_CPU_CLOCK_WRAPS
+
+#ifdef __cplusplus
+#define atomic_add(p, v)                                               \
+       std::atomic_fetch_add(p, (v))
+#define atomic_sub(p, v)                                               \
+       std::atomic_fetch_sub(p, (v))
+#define atomic_load_relaxed(p)                                 \
+       std::atomic_load_explicit(p,                            \
+                            std::memory_order_relaxed)
+#define atomic_load_acquire(p)                                 \
+       std::atomic_load_explicit(p,                            \
+                            std::memory_order_acquire)
+#define atomic_store_release(p, v)                             \
+       std::atomic_store_explicit(p, (v),                      \
+                            std::memory_order_release)
+#else
+#define atomic_add(p, v)                                       \
+       atomic_fetch_add((_Atomic typeof(*(p)) *)(p), v)
+#define atomic_sub(p, v)                                       \
+       atomic_fetch_sub((_Atomic typeof(*(p)) *)(p), v)
+#define atomic_load_relaxed(p)                                 \
+       atomic_load_explicit((_Atomic typeof(*(p)) *)(p),       \
+                            memory_order_relaxed)
+#define atomic_load_acquire(p)                                 \
+       atomic_load_explicit((_Atomic typeof(*(p)) *)(p),       \
+                            memory_order_acquire)
+#define atomic_store_release(p, v)                             \
+       atomic_store_explicit((_Atomic typeof(*(p)) *)(p), (v), \
+                             memory_order_release)
+#endif
+
+/* IWYU pragma: begin_exports */
 #if defined(__i386__)
 #include "arch-x86.h"
 #elif defined(__x86_64__)
@@ -57,16 +95,15 @@ enum {
 #include "arch-sh.h"
 #elif defined(__hppa__)
 #include "arch-hppa.h"
+#elif defined(__aarch64__)
+#include "arch-aarch64.h"
 #else
 #warning "Unknown architecture, attempting to use generic model."
 #include "arch-generic.h"
 #endif
 
-#ifdef ARCH_HAVE_FFZ
-#define ffz(bitmask)   arch_ffz(bitmask)
-#else
 #include "../lib/ffz.h"
-#endif
+/* IWYU pragma: end_exports */
 
 #ifndef ARCH_HAVE_INIT
 static inline int arch_init(char *envp[])
@@ -75,4 +112,32 @@ static inline int arch_init(char *envp[])
 }
 #endif
 
+#ifdef __alpha__
+/*
+ * alpha is the only exception, all other architectures
+ * have common numbers for new system calls.
+ */
+# ifndef __NR_io_uring_setup
+#  define __NR_io_uring_setup          535
+# endif
+# ifndef __NR_io_uring_enter
+#  define __NR_io_uring_enter          536
+# endif
+# ifndef __NR_io_uring_register
+#  define __NR_io_uring_register       537
+# endif
+#else /* !__alpha__ */
+# ifndef __NR_io_uring_setup
+#  define __NR_io_uring_setup          425
+# endif
+# ifndef __NR_io_uring_enter
+#  define __NR_io_uring_enter          426
+# endif
+# ifndef __NR_io_uring_register
+#  define __NR_io_uring_register       427
+# endif
+#endif
+
+#define ARCH_HAVE_IOURING
+
 #endif