Add spinlocks
authorJens Axboe <jens.axboe@oracle.com>
Thu, 12 Jun 2008 09:26:26 +0000 (11:26 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Thu, 12 Jun 2008 09:26:26 +0000 (11:26 +0200)
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
arch/arch-alpha.h
arch/arch-ia64.h
arch/arch-ppc.h
arch/arch-s390.h
arch/arch-sparc.h
arch/arch-sparc64.h
arch/arch-x86.h
arch/arch-x86_64.h
arch/arch.h

index ef14437..a5e3801 100644 (file)
 #define read_barrier()         __asm__ __volatile__("mb": : :"memory")
 #define writer_barrier()       __asm__ __volatile__("wmb": : :"memory")
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       long tmp;
+
+       __asm__ __volatile__("1:     ldl_l   %0,%1\n"
+                       "       bne     %0,2f\n"
+                       "       lda     %0,1\n"
+                       "       stl_c   %0,%1\n"
+                       "       beq     %0,2f\n"
+                       "       mb\n"
+                       ".subsection 2\n"
+                       "2:     ldl     %0,%1\n"
+                       "       bne     %0,2b\n"
+                       "       br      1b\n"
+                       ".previous"
+                       : "=&r" (tmp), "=m" (lock->lock)
+                       : "m"(lock->lock) : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       read_barrier();
+       lock->lock = 0;
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index 2f92684..d9afc32 100644 (file)
@@ -35,4 +35,38 @@ static inline unsigned long arch_ffz(unsigned long bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+       unsigned long flags = 0;
+
+       __asm__ __volatile__("{\n\t"
+                       "  mov ar.ccv = r0\n\t"
+                       "  mov r28 = ip\n\t"
+                       "  mov r30 = 1;;\n\t"
+                       "}\n\t"
+                       "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+                       "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+                       "cmp4.ne p14, p0 = r30, r0\n\t"
+                       "mov b6 = r29;;\n\t"
+                       "mov r27=%2\n\t"
+                       "(p14) br.cond.spnt.many b6"
+                       : "=r"(ptr) : "r"(ptr), "r" (flags)
+                       : IA64_SPINLOCK_CLOBBERS);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       read_barrier();
+       __asm__ __volatile__("st4.rel.nta [%0] = r0\n\t" :: "r" (lock));
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index 0611538..4839122 100644 (file)
@@ -44,4 +44,38 @@ static inline int arch_ffz(unsigned long bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+static inline void spin_trylock(spinlock_t *lock)
+{
+       unsigned long tmp;
+
+       __asm__ __volatile__(
+               "1:     lwarx           %0,0,%2\n\
+               cmpwi           0,%0,0\n\
+               bne-            2f\n\
+               stwcx.          1,0,%2\n\
+               bne-            1b\n\
+               isync\n\
+               2:"     : "=&r" (tmp)
+               : (&lock->lock)
+               : "cr0", "memory");
+
+       return tmp;
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       while (spin_trylock(lock))
+               ;
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       read_barrier();
+       lock->lock = 0;
+}
+
 #endif
index 0647750..8c6fa5e 100644 (file)
 #define read_barrier() asm volatile("bcr 15,0" : : : "memory")
 #define write_barrier()        asm volatile("bcr 15,0" : : : "memory")
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+static inline int
+_raw_compare_and_swap(volatile unsigned int *lock,
+                     unsigned int old, unsigned int new)
+{
+       __asm__ __volatile__(
+               "       cs      %0,%3,0(%4)"
+               : "=d" (old), "=m" (*lock)
+               : "0" (old), "d" (new), "a" (lock), "m" (*lock)
+               : "cc", "memory" );
+
+       return old;
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000))
+               return;
+
+       while (1) {
+               if (lock->lock)
+                       continue;
+               if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000))
+                       break;
+       }
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+        _raw_compare_and_swap(&lock->lock, 0x80000000, 0);
+}
+
 #endif
index cd552ab..de675da 100644 (file)
 #define read_barrier() __asm__ __volatile__ ("" : : : "memory")
 #define write_barrier()        __asm__ __volatile__ ("" : : : "memory")
 
+typedef struct {
+       volatile unsigned char lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               "\n1:\n\t"
+               "ldstub [%0], %%g2\n\t"
+               "orcc   %%g2, 0x0, %%g0\n\t"
+               "bne,a  2f\n\t"
+               " ldub  [%0], %%g2\n\t"
+               ".subsection    2\n"
+               "2:\n\t"
+               "orcc   %%g2, 0x0, %%g0\n\t"
+               "bne,a  2b\n\t"
+               " ldub  [%0], %%g2\n\t"
+               "b,a    1b\n\t"
+               ".previous\n"
+               : /* no outputs */
+               : "r" (lock)
+               : "g2", "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
+}
+
 #endif
index 332cf91..6ee8659 100644 (file)
 #define read_barrier()         membar_safe("#LoadLoad")
 #define write_barrier()                membar_safe("#StoreStore")
 
+typedef struct {
+       volatile unsigned char lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       unsigned long tmp;
+
+       __asm__ __volatile__(
+               "1:     ldstub          [%1], %0\n"
+               "       membar          #StoreLoad | #StoreStore\n"
+               "       brnz,pn         %0, 2f\n"
+               "        nop\n"
+               "       .subsection     2\n"
+               "2:     ldub            [%1], %0\n"
+               "       membar          #LoadLoad\n"
+               "       brnz,pt         %0, 2b\n"
+               "        nop\n"
+               "       ba,a,pt         %%xcc, 1b\n"
+               "       .previous"
+               : "=&r" (tmp)
+               : "r" (lock)
+               : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               "       membar          #StoreStore | #LoadStore\n"
+               "       stb             %%g0, [%0]"
+               : /* No outputs */
+               : "r" (lock)
+               : "memory");
+}
+
 #endif
index 97262ff..b9249ea 100644 (file)
@@ -40,4 +40,35 @@ static inline unsigned long arch_ffz(unsigned long bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       short inc = 0x0100;
+
+       __asm__ __volatile__("xaddw %w0, %1\n"
+                       "1:\t"
+                       "cmpb %h0, %b0\n\t"
+                       "je 2f\n\t"
+                       "rep ; nop\n\t"
+                       "movb %1, %b0\n\t"
+                       "jmp 1b\n"
+                       "2:"
+                       : "+Q" (inc), "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__("incb %0"
+                       : "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index 216e74e..71ba224 100644 (file)
@@ -40,4 +40,35 @@ static inline unsigned int arch_ffz(unsigned int bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       short inc = 0x0100;
+
+       __asm__ __volatile__("xaddw %w0, %1\n"
+                       "1:\t"
+                       "cmpb %h0, %b0\n\t"
+                       "je 2f\n\t"
+                       "rep ; nop\n\t"
+                       "movb %1, %b0\n\t"
+                       "jmp 1b\n"
+                       "2:"
+                       : "+Q" (inc), "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__("incb %0"
+                       : "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index f4eb855..0016357 100644 (file)
@@ -38,4 +38,9 @@ enum {
 #include "../lib/ffz.h"
 #endif
 
+static inline void spin_lock_init(spinlock_t *lock)
+{
+       lock->lock = 0;
+}
+
 #endif