From 69ebbd397e80b23ec4281c99e9f7242a089cc771 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 12 Jun 2008 11:26:26 +0200 Subject: [PATCH] Add spinlocks Signed-off-by: Jens Axboe --- arch/arch-alpha.h | 31 +++++++++++++++++++++++++++++++ arch/arch-ia64.h | 34 ++++++++++++++++++++++++++++++++++ arch/arch-ppc.h | 34 ++++++++++++++++++++++++++++++++++ arch/arch-s390.h | 35 +++++++++++++++++++++++++++++++++++ arch/arch-sparc.h | 29 +++++++++++++++++++++++++++++ arch/arch-sparc64.h | 35 +++++++++++++++++++++++++++++++++++ arch/arch-x86.h | 31 +++++++++++++++++++++++++++++++ arch/arch-x86_64.h | 31 +++++++++++++++++++++++++++++++ arch/arch.h | 5 +++++ 9 files changed, 265 insertions(+) diff --git a/arch/arch-alpha.h b/arch/arch-alpha.h index ef14437c..a5e3801c 100644 --- a/arch/arch-alpha.h +++ b/arch/arch-alpha.h @@ -22,4 +22,35 @@ #define read_barrier() __asm__ __volatile__("mb": : :"memory") #define writer_barrier() __asm__ __volatile__("wmb": : :"memory") +typedef struct { + volatile unsigned int lock; +} spinlock_t; + +static inline void spin_lock(spinlock_t *lock) +{ + long tmp; + + __asm__ __volatile__("1: ldl_l %0,%1\n" + " bne %0,2f\n" + " lda %0,1\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + " mb\n" + ".subsection 2\n" + "2: ldl %0,%1\n" + " bne %0,2b\n" + " br 1b\n" + ".previous" + : "=&r" (tmp), "=m" (lock->lock) + : "m"(lock->lock) : "memory"); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + read_barrier(); + lock->lock = 0; +} + +#define __SPIN_LOCK_UNLOCKED { 0 } + #endif diff --git a/arch/arch-ia64.h b/arch/arch-ia64.h index 2f926849..d9afc32e 100644 --- a/arch/arch-ia64.h +++ b/arch/arch-ia64.h @@ -35,4 +35,38 @@ static inline unsigned long arch_ffz(unsigned long bitmask) } #define ARCH_HAVE_FFZ +typedef struct { + volatile unsigned int lock; +} spinlock_t; + +#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" + +static inline void spin_lock(spinlock_t *lock) +{ + register volatile unsigned int *ptr asm ("r31") = &lock->lock; + unsigned long flags = 0; + + __asm__ __volatile__("{\n\t" + " mov ar.ccv = r0\n\t" + " mov r28 = ip\n\t" + " mov r30 = 1;;\n\t" + "}\n\t" + "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" + "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" + "cmp4.ne p14, p0 = r30, r0\n\t" + "mov b6 = r29;;\n\t" + "mov r27=%2\n\t" + "(p14) br.cond.spnt.many b6" + : "=r"(ptr) : "r"(ptr), "r" (flags) + : IA64_SPINLOCK_CLOBBERS); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + read_barrier(); + __asm__ __volatile__("st4.rel.nta [%0] = r0\n\t" :: "r" (lock)); +} + +#define __SPIN_LOCK_UNLOCKED { 0 } + #endif diff --git a/arch/arch-ppc.h b/arch/arch-ppc.h index 06115387..4839122e 100644 --- a/arch/arch-ppc.h +++ b/arch/arch-ppc.h @@ -44,4 +44,38 @@ static inline int arch_ffz(unsigned long bitmask) } #define ARCH_HAVE_FFZ +typedef struct { + volatile unsigned int lock; +} spinlock_t; + +static inline void spin_trylock(spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + "1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. 1,0,%2\n\ + bne- 1b\n\ + isync\n\ + 2:" : "=&r" (tmp) + : (&lock->lock) + : "cr0", "memory"); + + return tmp; +} + +static inline void spin_lock(spinlock_t *lock) +{ + while (spin_trylock(lock)) + ; +} + +static inline void spin_unlock(spinlock_t *lock) +{ + read_barrier(); + lock->lock = 0; +} + #endif diff --git a/arch/arch-s390.h b/arch/arch-s390.h index 06477503..8c6fa5ef 100644 --- a/arch/arch-s390.h +++ b/arch/arch-s390.h @@ -22,4 +22,39 @@ #define read_barrier() asm volatile("bcr 15,0" : : : "memory") #define write_barrier() asm volatile("bcr 15,0" : : : "memory") +typedef struct { + volatile unsigned int lock; +} spinlock_t; + +static inline int +_raw_compare_and_swap(volatile unsigned int *lock, + unsigned int old, unsigned int new) +{ + __asm__ __volatile__( + " cs %0,%3,0(%4)" + : "=d" (old), "=m" (*lock) + : "0" (old), "d" (new), "a" (lock), "m" (*lock) + : "cc", "memory" ); + + return old; +} + +static inline void spin_lock(spinlock_t *lock) +{ + if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000)) + return; + + while (1) { + if (lock->lock) + continue; + if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000)) + break; + } +} + +static inline void spin_unlock(spinlock_t *lock) +{ + _raw_compare_and_swap(&lock->lock, 0x80000000, 0); +} + #endif diff --git a/arch/arch-sparc.h b/arch/arch-sparc.h index cd552ab7..de675dae 100644 --- a/arch/arch-sparc.h +++ b/arch/arch-sparc.h @@ -23,4 +23,33 @@ #define read_barrier() __asm__ __volatile__ ("" : : : "memory") #define write_barrier() __asm__ __volatile__ ("" : : : "memory") +typedef struct { + volatile unsigned char lock; +} spinlock_t; + +static inline void spin_lock(spinlock_t *lock) +{ + __asm__ __volatile__( + "\n1:\n\t" + "ldstub [%0], %%g2\n\t" + "orcc %%g2, 0x0, %%g0\n\t" + "bne,a 2f\n\t" + " ldub [%0], %%g2\n\t" + ".subsection 2\n" + "2:\n\t" + "orcc %%g2, 0x0, %%g0\n\t" + "bne,a 2b\n\t" + " ldub [%0], %%g2\n\t" + "b,a 1b\n\t" + ".previous\n" + : /* no outputs */ + : "r" (lock) + : "g2", "memory", "cc"); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); +} + #endif diff --git a/arch/arch-sparc64.h b/arch/arch-sparc64.h index 332cf917..6ee86597 100644 --- a/arch/arch-sparc64.h +++ b/arch/arch-sparc64.h @@ -30,4 +30,39 @@ #define read_barrier() membar_safe("#LoadLoad") #define write_barrier() membar_safe("#StoreStore") +typedef struct { + volatile unsigned char lock; +} spinlock_t; + +static inline void spin_lock(spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + "1: ldstub [%1], %0\n" + " membar #StoreLoad | #StoreStore\n" + " brnz,pn %0, 2f\n" + " nop\n" + " .subsection 2\n" + "2: ldub [%1], %0\n" + " membar #LoadLoad\n" + " brnz,pt %0, 2b\n" + " nop\n" + " ba,a,pt %%xcc, 1b\n" + " .previous" + : "=&r" (tmp) + : "r" (lock) + : "memory"); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__( + " membar #StoreStore | #LoadStore\n" + " stb %%g0, [%0]" + : /* No outputs */ + : "r" (lock) + : "memory"); +} + #endif diff --git a/arch/arch-x86.h b/arch/arch-x86.h index 97262ffe..b9249eae 100644 --- a/arch/arch-x86.h +++ b/arch/arch-x86.h @@ -40,4 +40,35 @@ static inline unsigned long arch_ffz(unsigned long bitmask) } #define ARCH_HAVE_FFZ +typedef struct { + unsigned int lock; +} spinlock_t; + +static inline void spin_lock(spinlock_t *lock) +{ + short inc = 0x0100; + + __asm__ __volatile__("xaddw %w0, %1\n" + "1:\t" + "cmpb %h0, %b0\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movb %1, %b0\n\t" + "jmp 1b\n" + "2:" + : "+Q" (inc), "+m" (lock->lock) + : + : "memory", "cc"); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__("incb %0" + : "+m" (lock->lock) + : + : "memory", "cc"); +} + +#define __SPIN_LOCK_UNLOCKED { 0 } + #endif diff --git a/arch/arch-x86_64.h b/arch/arch-x86_64.h index 216e74e2..71ba2240 100644 --- a/arch/arch-x86_64.h +++ b/arch/arch-x86_64.h @@ -40,4 +40,35 @@ static inline unsigned int arch_ffz(unsigned int bitmask) } #define ARCH_HAVE_FFZ +typedef struct { + unsigned int lock; +} spinlock_t; + +static inline void spin_lock(spinlock_t *lock) +{ + short inc = 0x0100; + + __asm__ __volatile__("xaddw %w0, %1\n" + "1:\t" + "cmpb %h0, %b0\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movb %1, %b0\n\t" + "jmp 1b\n" + "2:" + : "+Q" (inc), "+m" (lock->lock) + : + : "memory", "cc"); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__("incb %0" + : "+m" (lock->lock) + : + : "memory", "cc"); +} + +#define __SPIN_LOCK_UNLOCKED { 0 } + #endif diff --git a/arch/arch.h b/arch/arch.h index f4eb855e..00163570 100644 --- a/arch/arch.h +++ b/arch/arch.h @@ -38,4 +38,9 @@ enum { #include "../lib/ffz.h" #endif +static inline void spin_lock_init(spinlock_t *lock) +{ + lock->lock = 0; +} + #endif -- 2.25.1