Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | #ifndef __LINUX_SPINLOCK_UP_H |
2 | #define __LINUX_SPINLOCK_UP_H | |
3 | ||
4 | #ifndef __LINUX_SPINLOCK_H | |
5 | # error "please don't include this file directly" | |
6 | #endif | |
7 | ||
d974d905 | 8 | #include <asm/processor.h> /* for cpu_relax() */ |
726328d9 | 9 | #include <asm/barrier.h> |
d974d905 | 10 | |
fb1c8f93 IM |
11 | /* |
12 | * include/linux/spinlock_up.h - UP-debug version of spinlocks. | |
13 | * | |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | |
15 | * Released under the General Public License (GPL). | |
16 | * | |
17 | * In the debug case, 1 means unlocked, 0 means locked. (the values | |
18 | * are inverted, to catch initialization bugs) | |
19 | * | |
386afc91 LT |
20 | * No atomicity anywhere, we are on UP. However, we still need |
21 | * the compiler barriers, because we do not want the compiler to | |
22 | * move potentially faulting instructions (notably user accesses) | |
23 | * into the locked sequence, resulting in non-atomic execution. | |
fb1c8f93 IM |
24 | */ |
25 | ||
26 | #ifdef CONFIG_DEBUG_SPINLOCK | |
0199c4e6 | 27 | #define arch_spin_is_locked(x) ((x)->slock == 0) |
fb1c8f93 | 28 | |
726328d9 PZ |
29 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
30 | { | |
31 | smp_cond_load_acquire(&lock->slock, VAL); | |
32 | } | |
33 | ||
0199c4e6 | 34 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
fb1c8f93 IM |
35 | { |
36 | lock->slock = 0; | |
386afc91 | 37 | barrier(); |
fb1c8f93 IM |
38 | } |
39 | ||
40 | static inline void | |
0199c4e6 | 41 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
fb1c8f93 IM |
42 | { |
43 | local_irq_save(flags); | |
44 | lock->slock = 0; | |
386afc91 | 45 | barrier(); |
fb1c8f93 IM |
46 | } |
47 | ||
0199c4e6 | 48 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
fb1c8f93 IM |
49 | { |
50 | char oldval = lock->slock; | |
51 | ||
52 | lock->slock = 0; | |
386afc91 | 53 | barrier(); |
fb1c8f93 IM |
54 | |
55 | return oldval > 0; | |
56 | } | |
57 | ||
0199c4e6 | 58 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
fb1c8f93 | 59 | { |
386afc91 | 60 | barrier(); |
fb1c8f93 IM |
61 | lock->slock = 1; |
62 | } | |
63 | ||
64 | /* | |
65 | * Read-write spinlocks. No debug version. | |
66 | */ | |
386afc91 LT |
67 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
68 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) | |
69 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
70 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
71 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
72 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
fb1c8f93 IM |
73 | |
74 | #else /* DEBUG_SPINLOCK */ | |
0199c4e6 | 75 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
726328d9 | 76 | #define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) |
0a0fca9d | 77 | /* for sched/core.c and kernel_lock.c: */ |
386afc91 LT |
78 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
79 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) | |
80 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) | |
81 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) | |
fb1c8f93 IM |
82 | #endif /* DEBUG_SPINLOCK */ |
83 | ||
0199c4e6 | 84 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
95c354fe | 85 | |
e5931943 TG |
86 | #define arch_read_can_lock(lock) (((void)(lock), 1)) |
87 | #define arch_write_can_lock(lock) (((void)(lock), 1)) | |
fb1c8f93 | 88 | |
fb1c8f93 | 89 | #endif /* __LINUX_SPINLOCK_UP_H */ |