Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
a33fda35 WL |
2 | /* |
3 | * Queued spinlock | |
4 | * | |
a33fda35 | 5 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. |
81d3dc9a | 6 | * (C) Copyright 2013-2014,2018 Red Hat, Inc. |
a33fda35 | 7 | * (C) Copyright 2015 Intel Corp. |
64d816cb | 8 | * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP |
a33fda35 | 9 | * |
81d3dc9a | 10 | * Authors: Waiman Long <longman@redhat.com> |
a33fda35 WL |
11 | * Peter Zijlstra <peterz@infradead.org> |
12 | */ | |
a23db284 WL |
13 | |
14 | #ifndef _GEN_PV_LOCK_SLOWPATH | |
15 | ||
a33fda35 WL |
16 | #include <linux/smp.h> |
17 | #include <linux/bug.h> | |
18 | #include <linux/cpumask.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/hardirq.h> | |
21 | #include <linux/mutex.h> | |
5671360f | 22 | #include <linux/prefetch.h> |
69f9cae9 | 23 | #include <asm/byteorder.h> |
a33fda35 WL |
24 | #include <asm/qspinlock.h> |
25 | ||
81d3dc9a WL |
26 | /* |
27 | * Include queued spinlock statistics code | |
28 | */ | |
29 | #include "qspinlock_stat.h" | |
30 | ||
a33fda35 WL |
31 | /* |
32 | * The basic principle of a queue-based spinlock can best be understood | |
33 | * by studying a classic queue-based spinlock implementation called the | |
34 | * MCS lock. The paper below provides a good description for this kind | |
35 | * of lock. | |
36 | * | |
37 | * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf | |
38 | * | |
39 | * This queued spinlock implementation is based on the MCS lock, however to make | |
40 | * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing | |
41 | * API, we must modify it somehow. | |
42 | * | |
43 | * In particular; where the traditional MCS lock consists of a tail pointer | |
44 | * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to | |
45 | * unlock the next pending (next->locked), we compress both these: {tail, | |
46 | * next->locked} into a single u32 value. | |
47 | * | |
48 | * Since a spinlock disables recursion of its own context and there is a limit | |
49 | * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there | |
50 | * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now | |
51 | * we can encode the tail by combining the 2-bit nesting level with the cpu | |
52 | * number. With one byte for the lock value and 3 bytes for the tail, only a | |
53 | * 32-bit word is now needed. Even though we only need 1 bit for the lock, | |
54 | * we extend it to a full byte to achieve better performance for architectures | |
55 | * that support atomic byte write. | |
56 | * | |
57 | * We also change the first spinner to spin on the lock bit instead of its | |
58 | * node; whereby avoiding the need to carry a node from lock to unlock, and | |
59 | * preserving existing lock API. This also makes the unlock code simpler and | |
60 | * faster. | |
69f9cae9 PZI |
61 | * |
62 | * N.B. The current implementation only supports architectures that allow | |
63 | * atomic operations on smaller 8-bit and 16-bit data types. | |
64 | * | |
a33fda35 WL |
65 | */ |
66 | ||
67 | #include "mcs_spinlock.h" | |
0fa809ca | 68 | #define MAX_NODES 4 |
a33fda35 | 69 | |
0fa809ca WL |
70 | /* |
71 | * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in | |
72 | * size and four of them will fit nicely in one 64-byte cacheline. For | |
73 | * pvqspinlock, however, we need more space for extra data. To accommodate | |
74 | * that, we insert two more long words to pad it up to 32 bytes. IOW, only | |
75 | * two of them can fit in a cacheline in this case. That is OK as it is rare | |
76 | * to have more than 2 levels of slowpath nesting in actual use. We don't | |
77 | * want to penalize pvqspinlocks to optimize for a rare case in native | |
78 | * qspinlocks. | |
79 | */ | |
80 | struct qnode { | |
81 | struct mcs_spinlock mcs; | |
a23db284 | 82 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
0fa809ca | 83 | long reserved[2]; |
a23db284 | 84 | #endif |
0fa809ca | 85 | }; |
a23db284 | 86 | |
6512276d WD |
87 | /* |
88 | * The pending bit spinning loop count. | |
89 | * This heuristic is used to limit the number of lockword accesses | |
90 | * made by atomic_cond_read_relaxed when waiting for the lock to | |
91 | * transition out of the "== _Q_PENDING_VAL" state. We don't spin | |
92 | * indefinitely because there's no guarantee that we'll make forward | |
93 | * progress. | |
94 | */ | |
95 | #ifndef _Q_PENDING_LOOPS | |
96 | #define _Q_PENDING_LOOPS 1 | |
97 | #endif | |
98 | ||
a33fda35 WL |
99 | /* |
100 | * Per-CPU queue node structures; we can never have more than 4 nested | |
101 | * contexts: task, softirq, hardirq, nmi. | |
102 | * | |
103 | * Exactly fits one 64-byte cacheline on a 64-bit architecture. | |
a23db284 WL |
104 | * |
105 | * PV doubles the storage and uses the second cacheline for PV state. | |
a33fda35 | 106 | */ |
0fa809ca | 107 | static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]); |
a33fda35 WL |
108 | |
109 | /* | |
110 | * We must be able to distinguish between no-tail and the tail at 0:0, | |
111 | * therefore increment the cpu number by one. | |
112 | */ | |
113 | ||
8d53fa19 | 114 | static inline __pure u32 encode_tail(int cpu, int idx) |
a33fda35 WL |
115 | { |
116 | u32 tail; | |
117 | ||
a33fda35 WL |
118 | tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; |
119 | tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ | |
120 | ||
121 | return tail; | |
122 | } | |
123 | ||
8d53fa19 | 124 | static inline __pure struct mcs_spinlock *decode_tail(u32 tail) |
a33fda35 WL |
125 | { |
126 | int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; | |
127 | int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; | |
128 | ||
0fa809ca WL |
129 | return per_cpu_ptr(&qnodes[idx].mcs, cpu); |
130 | } | |
131 | ||
132 | static inline __pure | |
133 | struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) | |
134 | { | |
135 | return &((struct qnode *)base + idx)->mcs; | |
a33fda35 WL |
136 | } |
137 | ||
c1fb159d PZI |
138 | #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) |
139 | ||
2c83e8e9 | 140 | #if _Q_PENDING_BITS == 8 |
59fb586b WD |
141 | /** |
142 | * clear_pending - clear the pending bit. | |
143 | * @lock: Pointer to queued spinlock structure | |
144 | * | |
145 | * *,1,* -> *,0,* | |
146 | */ | |
147 | static __always_inline void clear_pending(struct qspinlock *lock) | |
148 | { | |
149 | WRITE_ONCE(lock->pending, 0); | |
150 | } | |
151 | ||
69f9cae9 PZI |
152 | /** |
153 | * clear_pending_set_locked - take ownership and clear the pending bit. | |
154 | * @lock: Pointer to queued spinlock structure | |
155 | * | |
156 | * *,1,0 -> *,0,1 | |
157 | * | |
158 | * Lock stealing is not allowed if this function is used. | |
159 | */ | |
160 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | |
161 | { | |
625e88be | 162 | WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); |
69f9cae9 PZI |
163 | } |
164 | ||
165 | /* | |
166 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | |
167 | * @lock : Pointer to queued spinlock structure | |
168 | * @tail : The new queue tail code word | |
169 | * Return: The previous queue tail code word | |
170 | * | |
548095de | 171 | * xchg(lock, tail), which heads an address dependency |
69f9cae9 PZI |
172 | * |
173 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | |
174 | */ | |
175 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | |
176 | { | |
64d816cb | 177 | /* |
9d4646d1 WD |
178 | * We can use relaxed semantics since the caller ensures that the |
179 | * MCS node is properly initialized before updating the tail. | |
64d816cb | 180 | */ |
9d4646d1 | 181 | return (u32)xchg_relaxed(&lock->tail, |
64d816cb | 182 | tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; |
69f9cae9 PZI |
183 | } |
184 | ||
185 | #else /* _Q_PENDING_BITS == 8 */ | |
186 | ||
59fb586b WD |
187 | /** |
188 | * clear_pending - clear the pending bit. | |
189 | * @lock: Pointer to queued spinlock structure | |
190 | * | |
191 | * *,1,* -> *,0,* | |
192 | */ | |
193 | static __always_inline void clear_pending(struct qspinlock *lock) | |
194 | { | |
195 | atomic_andnot(_Q_PENDING_VAL, &lock->val); | |
196 | } | |
197 | ||
6403bd7d WL |
198 | /** |
199 | * clear_pending_set_locked - take ownership and clear the pending bit. | |
200 | * @lock: Pointer to queued spinlock structure | |
201 | * | |
202 | * *,1,0 -> *,0,1 | |
203 | */ | |
204 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | |
205 | { | |
206 | atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); | |
207 | } | |
208 | ||
209 | /** | |
210 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | |
211 | * @lock : Pointer to queued spinlock structure | |
212 | * @tail : The new queue tail code word | |
213 | * Return: The previous queue tail code word | |
214 | * | |
215 | * xchg(lock, tail) | |
216 | * | |
217 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | |
218 | */ | |
219 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | |
220 | { | |
221 | u32 old, new, val = atomic_read(&lock->val); | |
222 | ||
223 | for (;;) { | |
224 | new = (val & _Q_LOCKED_PENDING_MASK) | tail; | |
64d816cb | 225 | /* |
9d4646d1 WD |
226 | * We can use relaxed semantics since the caller ensures that |
227 | * the MCS node is properly initialized before updating the | |
228 | * tail. | |
64d816cb | 229 | */ |
9d4646d1 | 230 | old = atomic_cmpxchg_relaxed(&lock->val, val, new); |
6403bd7d WL |
231 | if (old == val) |
232 | break; | |
233 | ||
234 | val = old; | |
235 | } | |
236 | return old; | |
237 | } | |
69f9cae9 | 238 | #endif /* _Q_PENDING_BITS == 8 */ |
6403bd7d | 239 | |
7aa54be2 PZ |
240 | /** |
241 | * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending | |
242 | * @lock : Pointer to queued spinlock structure | |
243 | * Return: The previous lock value | |
244 | * | |
245 | * *,*,* -> *,1,* | |
246 | */ | |
247 | #ifndef queued_fetch_set_pending_acquire | |
248 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) | |
249 | { | |
250 | return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); | |
251 | } | |
252 | #endif | |
253 | ||
2c83e8e9 WL |
254 | /** |
255 | * set_locked - Set the lock bit and own the lock | |
256 | * @lock: Pointer to queued spinlock structure | |
257 | * | |
258 | * *,*,0 -> *,0,1 | |
259 | */ | |
260 | static __always_inline void set_locked(struct qspinlock *lock) | |
261 | { | |
625e88be | 262 | WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); |
2c83e8e9 WL |
263 | } |
264 | ||
a23db284 WL |
265 | |
266 | /* | |
267 | * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for | |
268 | * all the PV callbacks. | |
269 | */ | |
270 | ||
271 | static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } | |
cd0272fa WL |
272 | static __always_inline void __pv_wait_node(struct mcs_spinlock *node, |
273 | struct mcs_spinlock *prev) { } | |
75d22702 WL |
274 | static __always_inline void __pv_kick_node(struct qspinlock *lock, |
275 | struct mcs_spinlock *node) { } | |
1c4941fd WL |
276 | static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, |
277 | struct mcs_spinlock *node) | |
278 | { return 0; } | |
a23db284 WL |
279 | |
280 | #define pv_enabled() false | |
281 | ||
282 | #define pv_init_node __pv_init_node | |
283 | #define pv_wait_node __pv_wait_node | |
284 | #define pv_kick_node __pv_kick_node | |
1c4941fd | 285 | #define pv_wait_head_or_lock __pv_wait_head_or_lock |
a23db284 WL |
286 | |
287 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | |
288 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | |
289 | #endif | |
290 | ||
291 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | |
292 | ||
a33fda35 WL |
293 | /** |
294 | * queued_spin_lock_slowpath - acquire the queued spinlock | |
295 | * @lock: Pointer to queued spinlock structure | |
296 | * @val: Current value of the queued spinlock 32-bit word | |
297 | * | |
c1fb159d | 298 | * (queue tail, pending bit, lock value) |
a33fda35 | 299 | * |
c1fb159d PZI |
300 | * fast : slow : unlock |
301 | * : : | |
302 | * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) | |
303 | * : | ^--------.------. / : | |
304 | * : v \ \ | : | |
305 | * pending : (0,1,1) +--> (0,1,0) \ | : | |
306 | * : | ^--' | | : | |
307 | * : v | | : | |
308 | * uncontended : (n,x,y) +--> (n,0,0) --' | : | |
309 | * queue : | ^--' | : | |
310 | * : v | : | |
311 | * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : | |
312 | * queue : ^--' : | |
a33fda35 WL |
313 | */ |
314 | void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | |
315 | { | |
316 | struct mcs_spinlock *prev, *next, *node; | |
59fb586b | 317 | u32 old, tail; |
a33fda35 WL |
318 | int idx; |
319 | ||
320 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); | |
321 | ||
a23db284 | 322 | if (pv_enabled()) |
81d3dc9a | 323 | goto pv_queue; |
a23db284 | 324 | |
43b3f028 | 325 | if (virt_spin_lock(lock)) |
2aa79af6 PZI |
326 | return; |
327 | ||
c1fb159d | 328 | /* |
6512276d WD |
329 | * Wait for in-progress pending->locked hand-overs with a bounded |
330 | * number of spins so that we guarantee forward progress. | |
c1fb159d PZI |
331 | * |
332 | * 0,1,0 -> 0,0,1 | |
333 | */ | |
334 | if (val == _Q_PENDING_VAL) { | |
6512276d WD |
335 | int cnt = _Q_PENDING_LOOPS; |
336 | val = atomic_cond_read_relaxed(&lock->val, | |
337 | (VAL != _Q_PENDING_VAL) || !cnt--); | |
c1fb159d PZI |
338 | } |
339 | ||
59fb586b WD |
340 | /* |
341 | * If we observe any contention; queue. | |
342 | */ | |
343 | if (val & ~_Q_LOCKED_MASK) | |
344 | goto queue; | |
345 | ||
c1fb159d PZI |
346 | /* |
347 | * trylock || pending | |
348 | * | |
756b1df4 | 349 | * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock |
c1fb159d | 350 | */ |
7aa54be2 | 351 | val = queued_fetch_set_pending_acquire(lock); |
756b1df4 | 352 | |
53bf57fa | 353 | /* |
756b1df4 PZ |
354 | * If we observe contention, there is a concurrent locker. |
355 | * | |
356 | * Undo and queue; our setting of PENDING might have made the | |
357 | * n,0,0 -> 0,0,0 transition fail and it will now be waiting | |
358 | * on @next to become !NULL. | |
53bf57fa PZ |
359 | */ |
360 | if (unlikely(val & ~_Q_LOCKED_MASK)) { | |
756b1df4 PZ |
361 | |
362 | /* Undo PENDING if we set it. */ | |
53bf57fa PZ |
363 | if (!(val & _Q_PENDING_MASK)) |
364 | clear_pending(lock); | |
756b1df4 | 365 | |
53bf57fa | 366 | goto queue; |
59fb586b | 367 | } |
c1fb159d PZI |
368 | |
369 | /* | |
53bf57fa PZ |
370 | * We're pending, wait for the owner to go away. |
371 | * | |
372 | * 0,1,1 -> 0,1,0 | |
373 | * | |
374 | * this wait loop must be a load-acquire such that we match the | |
375 | * store-release that clears the locked bit and create lock | |
376 | * sequentiality; this is because not all | |
377 | * clear_pending_set_locked() implementations imply full | |
378 | * barriers. | |
379 | */ | |
380 | if (val & _Q_LOCKED_MASK) | |
381 | atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); | |
382 | ||
383 | /* | |
384 | * take ownership and clear the pending bit. | |
385 | * | |
386 | * 0,1,0 -> 0,0,1 | |
c1fb159d | 387 | */ |
53bf57fa | 388 | clear_pending_set_locked(lock); |
ad53fa10 | 389 | lockevent_inc(lock_pending); |
53bf57fa | 390 | return; |
c1fb159d PZI |
391 | |
392 | /* | |
393 | * End of pending bit optimistic spinning and beginning of MCS | |
394 | * queuing. | |
395 | */ | |
396 | queue: | |
ad53fa10 | 397 | lockevent_inc(lock_slowpath); |
81d3dc9a | 398 | pv_queue: |
0fa809ca | 399 | node = this_cpu_ptr(&qnodes[0].mcs); |
a33fda35 WL |
400 | idx = node->count++; |
401 | tail = encode_tail(smp_processor_id(), idx); | |
402 | ||
d682b596 WL |
403 | /* |
404 | * 4 nodes are allocated based on the assumption that there will | |
405 | * not be nested NMIs taking spinlocks. That may not be true in | |
406 | * some architectures even though the chance of needing more than | |
407 | * 4 nodes will still be extremely unlikely. When that happens, | |
408 | * we fall back to spinning on the lock directly without using | |
409 | * any MCS node. This is not the most elegant solution, but is | |
410 | * simple enough. | |
411 | */ | |
412 | if (unlikely(idx >= MAX_NODES)) { | |
ad53fa10 | 413 | lockevent_inc(lock_no_node); |
d682b596 WL |
414 | while (!queued_spin_trylock(lock)) |
415 | cpu_relax(); | |
416 | goto release; | |
417 | } | |
418 | ||
0fa809ca | 419 | node = grab_mcs_node(node, idx); |
11dc1322 | 420 | |
1222109a WL |
421 | /* |
422 | * Keep counts of non-zero index values: | |
423 | */ | |
ad53fa10 | 424 | lockevent_cond_inc(lock_use_node2 + idx - 1, idx); |
1222109a | 425 | |
11dc1322 WD |
426 | /* |
427 | * Ensure that we increment the head node->count before initialising | |
428 | * the actual node. If the compiler is kind enough to reorder these | |
429 | * stores, then an IRQ could overwrite our assignments. | |
430 | */ | |
431 | barrier(); | |
432 | ||
a33fda35 WL |
433 | node->locked = 0; |
434 | node->next = NULL; | |
a23db284 | 435 | pv_init_node(node); |
a33fda35 WL |
436 | |
437 | /* | |
6403bd7d WL |
438 | * We touched a (possibly) cold cacheline in the per-cpu queue node; |
439 | * attempt the trylock once more in the hope someone let go while we | |
440 | * weren't watching. | |
a33fda35 | 441 | */ |
6403bd7d WL |
442 | if (queued_spin_trylock(lock)) |
443 | goto release; | |
a33fda35 WL |
444 | |
445 | /* | |
9d4646d1 WD |
446 | * Ensure that the initialisation of @node is complete before we |
447 | * publish the updated tail via xchg_tail() and potentially link | |
448 | * @node into the waitqueue via WRITE_ONCE(prev->next, node) below. | |
449 | */ | |
450 | smp_wmb(); | |
451 | ||
452 | /* | |
453 | * Publish the updated tail. | |
6403bd7d WL |
454 | * We have already touched the queueing cacheline; don't bother with |
455 | * pending stuff. | |
456 | * | |
457 | * p,*,* -> n,*,* | |
a33fda35 | 458 | */ |
6403bd7d | 459 | old = xchg_tail(lock, tail); |
aa68744f | 460 | next = NULL; |
a33fda35 WL |
461 | |
462 | /* | |
463 | * if there was a previous node; link it and wait until reaching the | |
464 | * head of the waitqueue. | |
465 | */ | |
6403bd7d | 466 | if (old & _Q_TAIL_MASK) { |
a33fda35 | 467 | prev = decode_tail(old); |
95bcade3 | 468 | |
9d4646d1 WD |
469 | /* Link @node into the waitqueue. */ |
470 | WRITE_ONCE(prev->next, node); | |
a33fda35 | 471 | |
cd0272fa | 472 | pv_wait_node(node, prev); |
a33fda35 | 473 | arch_mcs_spin_lock_contended(&node->locked); |
81b55986 WL |
474 | |
475 | /* | |
476 | * While waiting for the MCS lock, the next pointer may have | |
477 | * been set by another lock waiter. We optimistically load | |
478 | * the next pointer & prefetch the cacheline for writing | |
479 | * to reduce latency in the upcoming MCS unlock operation. | |
480 | */ | |
481 | next = READ_ONCE(node->next); | |
482 | if (next) | |
483 | prefetchw(next); | |
a33fda35 WL |
484 | } |
485 | ||
486 | /* | |
c1fb159d PZI |
487 | * we're at the head of the waitqueue, wait for the owner & pending to |
488 | * go away. | |
a33fda35 | 489 | * |
c1fb159d | 490 | * *,x,y -> *,0,0 |
2c83e8e9 WL |
491 | * |
492 | * this wait loop must use a load-acquire such that we match the | |
493 | * store-release that clears the locked bit and create lock | |
494 | * sequentiality; this is because the set_locked() function below | |
495 | * does not imply a full barrier. | |
496 | * | |
1c4941fd WL |
497 | * The PV pv_wait_head_or_lock function, if active, will acquire |
498 | * the lock and return a non-zero value. So we have to skip the | |
f9c811fa WD |
499 | * atomic_cond_read_acquire() call. As the next PV queue head hasn't |
500 | * been designated yet, there is no way for the locked value to become | |
1c4941fd WL |
501 | * _Q_SLOW_VAL. So both the set_locked() and the |
502 | * atomic_cmpxchg_relaxed() calls will be safe. | |
503 | * | |
504 | * If PV isn't active, 0 will be returned instead. | |
505 | * | |
a33fda35 | 506 | */ |
1c4941fd WL |
507 | if ((val = pv_wait_head_or_lock(lock, node))) |
508 | goto locked; | |
509 | ||
f9c811fa | 510 | val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); |
a33fda35 | 511 | |
1c4941fd | 512 | locked: |
a33fda35 WL |
513 | /* |
514 | * claim the lock: | |
515 | * | |
c1fb159d | 516 | * n,0,0 -> 0,0,1 : lock, uncontended |
59fb586b | 517 | * *,*,0 -> *,*,1 : lock, contended |
2c83e8e9 | 518 | * |
59fb586b WD |
519 | * If the queue head is the only one in the queue (lock value == tail) |
520 | * and nobody is pending, clear the tail code and grab the lock. | |
521 | * Otherwise, we only need to grab the lock. | |
a33fda35 | 522 | */ |
c61da58d | 523 | |
ae75d908 | 524 | /* |
756b1df4 PZ |
525 | * In the PV case we might already have _Q_LOCKED_VAL set, because |
526 | * of lock stealing; therefore we must also allow: | |
ae75d908 | 527 | * |
756b1df4 PZ |
528 | * n,0,1 -> 0,0,1 |
529 | * | |
530 | * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the | |
531 | * above wait condition, therefore any concurrent setting of | |
532 | * PENDING will make the uncontended transition fail. | |
ae75d908 | 533 | */ |
756b1df4 PZ |
534 | if ((val & _Q_TAIL_MASK) == tail) { |
535 | if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) | |
536 | goto release; /* No contention */ | |
537 | } | |
a33fda35 | 538 | |
756b1df4 PZ |
539 | /* |
540 | * Either somebody is queued behind us or _Q_PENDING_VAL got set | |
541 | * which will then detect the remaining tail and queue behind us | |
542 | * ensuring we'll see a @next. | |
543 | */ | |
c61da58d WD |
544 | set_locked(lock); |
545 | ||
a33fda35 | 546 | /* |
aa68744f | 547 | * contended path; wait for next if not observed yet, release. |
a33fda35 | 548 | */ |
c131a198 WD |
549 | if (!next) |
550 | next = smp_cond_load_relaxed(&node->next, (VAL)); | |
a33fda35 | 551 | |
2c83e8e9 | 552 | arch_mcs_spin_unlock_contended(&next->locked); |
75d22702 | 553 | pv_kick_node(lock, next); |
a33fda35 WL |
554 | |
555 | release: | |
556 | /* | |
557 | * release the node | |
558 | */ | |
0fa809ca | 559 | __this_cpu_dec(qnodes[0].mcs.count); |
a33fda35 WL |
560 | } |
561 | EXPORT_SYMBOL(queued_spin_lock_slowpath); | |
a23db284 WL |
562 | |
563 | /* | |
564 | * Generate the paravirt code for queued_spin_unlock_slowpath(). | |
565 | */ | |
566 | #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) | |
567 | #define _GEN_PV_LOCK_SLOWPATH | |
568 | ||
569 | #undef pv_enabled | |
570 | #define pv_enabled() true | |
571 | ||
572 | #undef pv_init_node | |
573 | #undef pv_wait_node | |
574 | #undef pv_kick_node | |
1c4941fd | 575 | #undef pv_wait_head_or_lock |
a23db284 WL |
576 | |
577 | #undef queued_spin_lock_slowpath | |
578 | #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath | |
579 | ||
580 | #include "qspinlock_paravirt.h" | |
581 | #include "qspinlock.c" | |
582 | ||
583 | #endif |