| 1 | #ifndef ARCH_X86_64_h |
| 2 | #define ARCH_X86_64_h |
| 3 | |
| 4 | #define ARCH (arch_x86_64) |
| 5 | |
| 6 | #ifndef __NR_ioprio_set |
| 7 | #define __NR_ioprio_set 251 |
| 8 | #define __NR_ioprio_get 252 |
| 9 | #endif |
| 10 | |
| 11 | #ifndef __NR_fadvise64 |
| 12 | #define __NR_fadvise64 221 |
| 13 | #endif |
| 14 | |
| 15 | #ifndef __NR_sys_splice |
| 16 | #define __NR_sys_splice 275 |
| 17 | #define __NR_sys_tee 276 |
| 18 | #define __NR_sys_vmsplice 278 |
| 19 | #endif |
| 20 | |
| 21 | #ifndef __NR_async_exec |
| 22 | #define __NR_async_exec 286 |
| 23 | #define __NR_async_wait 287 |
| 24 | #define __NR_umem_add 288 |
| 25 | #define __NR_async_thread 289 |
| 26 | #endif |
| 27 | |
| 28 | #define FIO_HUGE_PAGE 2097152 |
| 29 | |
| 30 | #define FIO_HAVE_SYSLET |
| 31 | |
| 32 | #define nop __asm__ __volatile__("rep;nop": : :"memory") |
| 33 | #define read_barrier() __asm__ __volatile__("lfence":::"memory") |
| 34 | #define write_barrier() __asm__ __volatile__("sfence":::"memory") |
| 35 | |
| 36 | static inline unsigned int arch_ffz(unsigned int bitmask) |
| 37 | { |
| 38 | __asm__("bsfl %1,%0" :"=r" (bitmask) :"r" (~bitmask)); |
| 39 | return bitmask; |
| 40 | } |
| 41 | #define ARCH_HAVE_FFZ |
| 42 | |
| 43 | typedef struct { |
| 44 | unsigned int lock; |
| 45 | } spinlock_t; |
| 46 | |
| 47 | static inline void spin_lock(spinlock_t *lock) |
| 48 | { |
| 49 | short inc = 0x0100; |
| 50 | |
| 51 | __asm__ __volatile__("xaddw %w0, %1\n" |
| 52 | "1:\t" |
| 53 | "cmpb %h0, %b0\n\t" |
| 54 | "je 2f\n\t" |
| 55 | "rep ; nop\n\t" |
| 56 | "movb %1, %b0\n\t" |
| 57 | "jmp 1b\n" |
| 58 | "2:" |
| 59 | : "+Q" (inc), "+m" (lock->lock) |
| 60 | : |
| 61 | : "memory", "cc"); |
| 62 | } |
| 63 | |
| 64 | static inline void spin_unlock(spinlock_t *lock) |
| 65 | { |
| 66 | __asm__ __volatile__("incb %0" |
| 67 | : "+m" (lock->lock) |
| 68 | : |
| 69 | : "memory", "cc"); |
| 70 | } |
| 71 | |
| 72 | #define __SPIN_LOCK_UNLOCKED { 0 } |
| 73 | |
| 74 | #endif |