Merge tag 'bcachefs-2024-10-05' of git://evilpiepirate.org/bcachefs
[linux-2.6-block.git] / arch / parisc / include / asm / spinlock.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4
1cab4201
REB
5#include <asm/barrier.h>
6#include <asm/ldcw.h>
fb1c8f93
IM
7#include <asm/processor.h>
8#include <asm/spinlock_types.h>
1da177e4 9
15e64ef6
HD
10static inline void arch_spin_val_check(int lock_val)
11{
12 if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
13 asm volatile( "andcm,= %0,%1,%%r0\n"
14 ".word %2\n"
15 : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
16 "i" (SPINLOCK_BREAK_INSN));
17}
18
0199c4e6 19static inline int arch_spin_is_locked(arch_spinlock_t *x)
1da177e4 20{
15e64ef6
HD
21 volatile unsigned int *a;
22 int lock_val;
23
24 a = __ldcw_align(x);
25 lock_val = READ_ONCE(*a);
26 arch_spin_val_check(lock_val);
27 return (lock_val == 0);
1da177e4
LT
28}
29
f173e3a7
JDA
30static inline void arch_spin_lock(arch_spinlock_t *x)
31{
32 volatile unsigned int *a;
33
34 a = __ldcw_align(x);
15e64ef6
HD
35 do {
36 int lock_val_old;
37
38 lock_val_old = __ldcw(a);
39 arch_spin_val_check(lock_val_old);
40 if (lock_val_old)
41 return; /* got lock */
42
43 /* wait until we should try to get lock again */
f173e3a7
JDA
44 while (*a == 0)
45 continue;
15e64ef6 46 } while (1);
f173e3a7 47}
726328d9 48
0199c4e6 49static inline void arch_spin_unlock(arch_spinlock_t *x)
1da177e4
LT
50{
51 volatile unsigned int *a;
3b885ac1 52
1da177e4 53 a = __ldcw_align(x);
157e9afc 54 /* Release with ordered store. */
15e64ef6
HD
55 __asm__ __volatile__("stw,ma %0,0(%1)"
56 : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
1da177e4
LT
57}
58
0199c4e6 59static inline int arch_spin_trylock(arch_spinlock_t *x)
1da177e4
LT
60{
61 volatile unsigned int *a;
15e64ef6 62 int lock_val;
1da177e4 63
1da177e4 64 a = __ldcw_align(x);
15e64ef6
HD
65 lock_val = __ldcw(a);
66 arch_spin_val_check(lock_val);
67 return lock_val != 0;
1da177e4 68}
1da177e4
LT
69
70/*
6e071852 71 * Read-write spinlocks, allowing multiple readers but only one writer.
fbdc8f0f 72 * Unfair locking as Writers could be starved indefinitely by Reader(s)
65ee8f0a 73 *
fbdc8f0f
HD
74 * The spinlock itself is contained in @counter and access to it is
75 * serialized with @lock_mutex.
1da177e4 76 */
1da177e4 77
fbdc8f0f
HD
78/* 1 - lock taken successfully */
79static inline int arch_read_trylock(arch_rwlock_t *rw)
1da177e4 80{
fbdc8f0f 81 int ret = 0;
6e071852 82 unsigned long flags;
1da177e4 83
6e071852 84 local_irq_save(flags);
fbdc8f0f
HD
85 arch_spin_lock(&(rw->lock_mutex));
86
87 /*
88 * zero means writer holds the lock exclusively, deny Reader.
89 * Otherwise grant lock to first/subseq reader
90 */
91 if (rw->counter > 0) {
92 rw->counter--;
93 ret = 1;
94 }
95
96 arch_spin_unlock(&(rw->lock_mutex));
6e071852 97 local_irq_restore(flags);
fbdc8f0f
HD
98
99 return ret;
1da177e4
LT
100}
101
fbdc8f0f
HD
102/* 1 - lock taken successfully */
103static inline int arch_write_trylock(arch_rwlock_t *rw)
6e071852 104{
fbdc8f0f 105 int ret = 0;
6e071852 106 unsigned long flags;
fbdc8f0f 107
6e071852 108 local_irq_save(flags);
fbdc8f0f
HD
109 arch_spin_lock(&(rw->lock_mutex));
110
111 /*
112 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
113 * deny writer. Otherwise if unlocked grant to writer
114 * Hence the claim that Linux rwlocks are unfair to writers.
115 * (can be starved for an indefinite time by readers).
116 */
117 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
118 rw->counter = 0;
119 ret = 1;
6e071852 120 }
fbdc8f0f 121 arch_spin_unlock(&(rw->lock_mutex));
6e071852 122 local_irq_restore(flags);
6e071852 123
fbdc8f0f
HD
124 return ret;
125}
126
127static inline void arch_read_lock(arch_rwlock_t *rw)
128{
129 while (!arch_read_trylock(rw))
6e071852 130 cpu_relax();
fbdc8f0f 131}
6e071852 132
fbdc8f0f
HD
133static inline void arch_write_lock(arch_rwlock_t *rw)
134{
135 while (!arch_write_trylock(rw))
136 cpu_relax();
6e071852 137}
1da177e4 138
fbdc8f0f 139static inline void arch_read_unlock(arch_rwlock_t *rw)
1da177e4 140{
6e071852 141 unsigned long flags;
1da177e4 142
fbdc8f0f
HD
143 local_irq_save(flags);
144 arch_spin_lock(&(rw->lock_mutex));
145 rw->counter++;
146 arch_spin_unlock(&(rw->lock_mutex));
6e071852 147 local_irq_restore(flags);
1da177e4 148}
1da177e4 149
fbdc8f0f 150static inline void arch_write_unlock(arch_rwlock_t *rw)
1da177e4 151{
6e071852 152 unsigned long flags;
6e071852
MW
153
154 local_irq_save(flags);
fbdc8f0f
HD
155 arch_spin_lock(&(rw->lock_mutex));
156 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
157 arch_spin_unlock(&(rw->lock_mutex));
6e071852 158 local_irq_restore(flags);
1da177e4 159}
1da177e4 160
1da177e4 161#endif /* __ASM_SPINLOCK_H */