#define read_barrier() __asm__ __volatile__("mb": : :"memory")
#define writer_barrier() __asm__ __volatile__("wmb": : :"memory")
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ long tmp;
+
+ __asm__ __volatile__("1: ldl_l %0,%1\n"
+ " bne %0,2f\n"
+ " lda %0,1\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: ldl %0,%1\n"
+ " bne %0,2b\n"
+ " br 1b\n"
+ ".previous"
+ : "=&r" (tmp), "=m" (lock->lock)
+ : "m"(lock->lock) : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ read_barrier();
+ lock->lock = 0;
+}
+
+#define __SPIN_LOCK_UNLOCKED { 0 }
+
#endif
}
#define ARCH_HAVE_FFZ
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+ unsigned long flags = 0;
+
+ __asm__ __volatile__("{\n\t"
+ " mov ar.ccv = r0\n\t"
+ " mov r28 = ip\n\t"
+ " mov r30 = 1;;\n\t"
+ "}\n\t"
+ "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+ "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+ "cmp4.ne p14, p0 = r30, r0\n\t"
+ "mov b6 = r29;;\n\t"
+ "mov r27=%2\n\t"
+ "(p14) br.cond.spnt.many b6"
+ : "=r"(ptr) : "r"(ptr), "r" (flags)
+ : IA64_SPINLOCK_CLOBBERS);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ read_barrier();
+ __asm__ __volatile__("st4.rel.nta [%0] = r0\n\t" :: "r" (lock));
+}
+
+#define __SPIN_LOCK_UNLOCKED { 0 }
+
#endif
}
#define ARCH_HAVE_FFZ
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+static inline void spin_trylock(spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ "1: lwarx %0,0,%2\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n\
+ stwcx. 1,0,%2\n\
+ bne- 1b\n\
+ isync\n\
+ 2:" : "=&r" (tmp)
+ : (&lock->lock)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ while (spin_trylock(lock))
+ ;
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ read_barrier();
+ lock->lock = 0;
+}
+
#endif
#define read_barrier() asm volatile("bcr 15,0" : : : "memory")
#define write_barrier() asm volatile("bcr 15,0" : : : "memory")
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+static inline int
+_raw_compare_and_swap(volatile unsigned int *lock,
+ unsigned int old, unsigned int new)
+{
+ __asm__ __volatile__(
+ " cs %0,%3,0(%4)"
+ : "=d" (old), "=m" (*lock)
+ : "0" (old), "d" (new), "a" (lock), "m" (*lock)
+ : "cc", "memory" );
+
+ return old;
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000))
+ return;
+
+ while (1) {
+ if (lock->lock)
+ continue;
+ if (!_raw_compare_and_swap(&lock->lock, 0, 0x80000000))
+ break;
+ }
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ _raw_compare_and_swap(&lock->lock, 0x80000000, 0);
+}
+
#endif
#define read_barrier() __asm__ __volatile__ ("" : : : "memory")
#define write_barrier() __asm__ __volatile__ ("" : : : "memory")
+typedef struct {
+ volatile unsigned char lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ "\n1:\n\t"
+ "ldstub [%0], %%g2\n\t"
+ "orcc %%g2, 0x0, %%g0\n\t"
+ "bne,a 2f\n\t"
+ " ldub [%0], %%g2\n\t"
+ ".subsection 2\n"
+ "2:\n\t"
+ "orcc %%g2, 0x0, %%g0\n\t"
+ "bne,a 2b\n\t"
+ " ldub [%0], %%g2\n\t"
+ "b,a 1b\n\t"
+ ".previous\n"
+ : /* no outputs */
+ : "r" (lock)
+ : "g2", "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
+}
+
#endif
#define read_barrier() membar_safe("#LoadLoad")
#define write_barrier() membar_safe("#StoreStore")
+typedef struct {
+ volatile unsigned char lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ "1: ldstub [%1], %0\n"
+ " membar #StoreLoad | #StoreStore\n"
+ " brnz,pn %0, 2f\n"
+ " nop\n"
+ " .subsection 2\n"
+ "2: ldub [%1], %0\n"
+ " membar #LoadLoad\n"
+ " brnz,pt %0, 2b\n"
+ " nop\n"
+ " ba,a,pt %%xcc, 1b\n"
+ " .previous"
+ : "=&r" (tmp)
+ : "r" (lock)
+ : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ " membar #StoreStore | #LoadStore\n"
+ " stb %%g0, [%0]"
+ : /* No outputs */
+ : "r" (lock)
+ : "memory");
+}
+
#endif
}
#define ARCH_HAVE_FFZ
+typedef struct {
+ unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ short inc = 0x0100;
+
+ __asm__ __volatile__("xaddw %w0, %1\n"
+ "1:\t"
+ "cmpb %h0, %b0\n\t"
+ "je 2f\n\t"
+ "rep ; nop\n\t"
+ "movb %1, %b0\n\t"
+ "jmp 1b\n"
+ "2:"
+ : "+Q" (inc), "+m" (lock->lock)
+ :
+ : "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("incb %0"
+ : "+m" (lock->lock)
+ :
+ : "memory", "cc");
+}
+
+#define __SPIN_LOCK_UNLOCKED { 0 }
+
#endif
}
#define ARCH_HAVE_FFZ
+typedef struct {
+ unsigned int lock;
+} spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ short inc = 0x0100;
+
+ __asm__ __volatile__("xaddw %w0, %1\n"
+ "1:\t"
+ "cmpb %h0, %b0\n\t"
+ "je 2f\n\t"
+ "rep ; nop\n\t"
+ "movb %1, %b0\n\t"
+ "jmp 1b\n"
+ "2:"
+ : "+Q" (inc), "+m" (lock->lock)
+ :
+ : "memory", "cc");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("incb %0"
+ : "+m" (lock->lock)
+ :
+ : "memory", "cc");
+}
+
+#define __SPIN_LOCK_UNLOCKED { 0 }
+
#endif
#include "../lib/ffz.h"
#endif
+static inline void spin_lock_init(spinlock_t *lock)
+{
+ lock->lock = 0;
+}
+
#endif