Fix end-of-job slowdown for random IO with a random map
[fio.git] / arch / arch-x86_64.h
index c8c3d18251caf721fe25bef5c85b6c765d675ee3..f2dcf497f63ec89ca88c8c3817de407c7a1d80c9 100644 (file)
 #define read_barrier() __asm__ __volatile__("lfence":::"memory")
 #define write_barrier()        __asm__ __volatile__("sfence":::"memory")
 
-static inline unsigned int arch_ffz(unsigned int bitmask)
+static inline unsigned long arch_ffz(unsigned long bitmask)
 {
-       __asm__("bsfl %1,%0" :"=r" (bitmask) :"r" (~bitmask));
+       __asm__("bsf %1,%0" :"=r" (bitmask) :"r" (~bitmask));
        return bitmask;
 }
-#define ARCH_HAVE_FFZ
-
-typedef struct {
-       unsigned int lock;
-} spinlock_t;
 
-static inline void spin_lock(spinlock_t *lock)
+static inline unsigned long long get_cpu_clock(void)
 {
-       short inc = 0x0100;
+       unsigned int lo, hi;
 
-       __asm__ __volatile__("xaddw %w0, %1\n"
-                       "1:\t"
-                       "cmpb %h0, %b0\n\t"
-                       "je 2f\n\t"
-                       "rep ; nop\n\t"
-                       "movb %1, %b0\n\t"
-                       "jmp 1b\n"
-                       "2:"
-                       : "+Q" (inc), "+m" (lock->lock)
-                       :
-                       : "memory", "cc");
+       __asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi));
+       return ((unsigned long long) hi << 32ULL) | lo;
 }
 
-static inline void spin_unlock(spinlock_t *lock)
-{
-       __asm__ __volatile__("incb %0"
-                       : "+m" (lock->lock)
-                       :
-                       : "memory", "cc");
-}
+#define ARCH_HAVE_FFZ
+#define ARCH_HAVE_SSE4_2
+#define ARCH_HAVE_CPU_CLOCK
 
 #endif