Add spinlocks
authorJens Axboe <jens.axboe@oracle.com>
Thu, 12 Jun 2008 09:26:26 +0000 (11:26 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Thu, 12 Jun 2008 09:26:26 +0000 (11:26 +0200)
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
arch/arch-alpha.h
arch/arch-ia64.h
arch/arch-ppc.h
arch/arch-s390.h
arch/arch-sparc.h
arch/arch-sparc64.h
arch/arch-x86.h
arch/arch-x86_64.h
arch/arch.h

index ef14437c0e27fb9e14a7955073feaf5dfcf29814..a5e3801c0bec312dbea52dbc4edd6793387c5bc5 100644 (file)
 #define read_barrier()         __asm__ __volatile__("mb": : :"memory")
 #define writer_barrier()       __asm__ __volatile__("wmb": : :"memory")
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       long tmp;
+
+       __asm__ __volatile__("1:     ldl_l   %0,%1\n"
+                       "       bne     %0,2f\n"
+                       "       lda     %0,1\n"
+                       "       stl_c   %0,%1\n"
+                       "       beq     %0,2f\n"
+                       "       mb\n"
+                       ".subsection 2\n"
+                       "2:     ldl     %0,%1\n"
+                       "       bne     %0,2b\n"
+                       "       br      1b\n"
+                       ".previous"
+                       : "=&r" (tmp), "=m" (lock->lock)
+                       : "m"(lock->lock) : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       read_barrier();
+       lock->lock = 0;
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index 2f926849fb04cc9e13d5e514c6ac6f255a32360e..d9afc32e702389ab0cc94fae80f696bf3a30d501 100644 (file)
@@ -35,4 +35,38 @@ static inline unsigned long arch_ffz(unsigned long bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+       unsigned long flags = 0;
+
+       __asm__ __volatile__("{\n\t"
+                       "  mov ar.ccv = r0\n\t"
+                       "  mov r28 = ip\n\t"
+                       "  mov r30 = 1;;\n\t"
+                       "}\n\t"
+                       "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+                       "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+                       "cmp4.ne p14, p0 = r30, r0\n\t"
+                       "mov b6 = r29;;\n\t"
+                       "mov r27=%2\n\t"
+                       "(p14) br.cond.spnt.many b6"
+                       : "=r"(ptr) : "r"(ptr), "r" (flags)
+                       : IA64_SPINLOCK_CLOBBERS);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       read_barrier();
+       __asm__ __volatile__("st4.rel.nta [%0] = r0\n\t" :: "r" (lock));
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index 061153873131db5964ca650a6d255e72b8035866..4839122e9328688a486aaff57b83ddcbcfbbafa5 100644 (file)
@@ -44,4 +44,38 @@ static inline int arch_ffz(unsigned long bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+static inline void spin_trylock(spinlock_t *lock)
+{
+       unsigned long tmp;
+
+       __asm__ __volatile__(
+               "1:     lwarx           %0,0,%2\n\
+               cmpwi           0,%0,0\n\
+               bne-            2f\n\
+               stwcx.          1,0,%2\n\
+               bne-            1b\n\
+               isync\n\
+               2:"     : "=&r" (tmp)
+               : (&lock->lock)
+               : "cr0", "memory");
+
+       return tmp;
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       while (spin_trylock(lock))
+               ;
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       read_barrier();
+       lock->lock = 0;
+}
+
 #endif
index 06477503be02859a41bd3186d1c2666ed7abc64f..8c6fa5ef2e5a92b0cbf391a8ca9a17efa663148a 100644 (file)
 #define read_barrier() asm volatile("bcr 15,0" : : : "memory")
 #define write_barrier()        asm volatile("bcr 15,0" : : : "memory")
 
+typedef struct {
+       volatile unsigned int lock;
+} spinlock_t;
+
+static inline int
+_raw_compare_and_swap(volatile unsigned int *lock,
+                     unsigned int old, unsigned int new)
+{
+       __asm__ __volatile__(
+               "       cs      %0,%3,0(%4)"
+               : "=d" (old), "=m" (*lock)
+               : "0" (old), "d" (new), "a" (lock), "m" (*lock)
+               : "cc", "memory" );
+
+       return old;
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000))
+               return;
+
+       while (1) {
+               if (lock->lock)
+                       continue;
+               if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000))
+                       break;
+       }
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+        _raw_compare_and_swap(&lock->lock, 0x80000000, 0);
+}
+
 #endif
index cd552ab7c94fd8ce613e64b38739a4be6741c2f7..de675daeba0fb497c732a4028b5a6930a3e1e159 100644 (file)
 #define read_barrier() __asm__ __volatile__ ("" : : : "memory")
 #define write_barrier()        __asm__ __volatile__ ("" : : : "memory")
 
+typedef struct {
+       volatile unsigned char lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               "\n1:\n\t"
+               "ldstub [%0], %%g2\n\t"
+               "orcc   %%g2, 0x0, %%g0\n\t"
+               "bne,a  2f\n\t"
+               " ldub  [%0], %%g2\n\t"
+               ".subsection    2\n"
+               "2:\n\t"
+               "orcc   %%g2, 0x0, %%g0\n\t"
+               "bne,a  2b\n\t"
+               " ldub  [%0], %%g2\n\t"
+               "b,a    1b\n\t"
+               ".previous\n"
+               : /* no outputs */
+               : "r" (lock)
+               : "g2", "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
+}
+
 #endif
index 332cf9179388377ebfc12bf85458f1dd6b73d5ea..6ee8659716f4654a28a00f34fef4f1313191ca1d 100644 (file)
 #define read_barrier()         membar_safe("#LoadLoad")
 #define write_barrier()                membar_safe("#StoreStore")
 
+typedef struct {
+       volatile unsigned char lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       unsigned long tmp;
+
+       __asm__ __volatile__(
+               "1:     ldstub          [%1], %0\n"
+               "       membar          #StoreLoad | #StoreStore\n"
+               "       brnz,pn         %0, 2f\n"
+               "        nop\n"
+               "       .subsection     2\n"
+               "2:     ldub            [%1], %0\n"
+               "       membar          #LoadLoad\n"
+               "       brnz,pt         %0, 2b\n"
+               "        nop\n"
+               "       ba,a,pt         %%xcc, 1b\n"
+               "       .previous"
+               : "=&r" (tmp)
+               : "r" (lock)
+               : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               "       membar          #StoreStore | #LoadStore\n"
+               "       stb             %%g0, [%0]"
+               : /* No outputs */
+               : "r" (lock)
+               : "memory");
+}
+
 #endif
index 97262ffe0769f9a445fe3042031867987350d9a2..b9249eae6f449d4f04e6fc2e6f25873eb9763527 100644 (file)
@@ -40,4 +40,35 @@ static inline unsigned long arch_ffz(unsigned long bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       short inc = 0x0100;
+
+       __asm__ __volatile__("xaddw %w0, %1\n"
+                       "1:\t"
+                       "cmpb %h0, %b0\n\t"
+                       "je 2f\n\t"
+                       "rep ; nop\n\t"
+                       "movb %1, %b0\n\t"
+                       "jmp 1b\n"
+                       "2:"
+                       : "+Q" (inc), "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__("incb %0"
+                       : "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index 216e74e2e32a409d1d7bbee30aa38f771cafdb40..71ba2240180f8937182725812529782f58dd8873 100644 (file)
@@ -40,4 +40,35 @@ static inline unsigned int arch_ffz(unsigned int bitmask)
 }
 #define ARCH_HAVE_FFZ
 
+typedef struct {
+       unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       short inc = 0x0100;
+
+       __asm__ __volatile__("xaddw %w0, %1\n"
+                       "1:\t"
+                       "cmpb %h0, %b0\n\t"
+                       "je 2f\n\t"
+                       "rep ; nop\n\t"
+                       "movb %1, %b0\n\t"
+                       "jmp 1b\n"
+                       "2:"
+                       : "+Q" (inc), "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       __asm__ __volatile__("incb %0"
+                       : "+m" (lock->lock)
+                       :
+                       : "memory", "cc");
+}
+
+#define __SPIN_LOCK_UNLOCKED   { 0 }
+
 #endif
index f4eb855ebf340d8a002759c8384323cd63218a97..00163570f82c04f9de2825af424fae28b5d43a2d 100644 (file)
@@ -38,4 +38,9 @@ enum {
 #include "../lib/ffz.h"
 #endif
 
+static inline void spin_lock_init(spinlock_t *lock)
+{
+       lock->lock = 0;
+}
+
 #endif