Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
a23db284 WL |
2 | #ifndef _GEN_PV_LOCK_SLOWPATH |
3 | #error "do not include this file" | |
4 | #endif | |
5 | ||
6 | #include <linux/hash.h> | |
57c8a661 | 7 | #include <linux/memblock.h> |
cba77f03 | 8 | #include <linux/debug_locks.h> |
a23db284 WL |
9 | |
10 | /* | |
11 | * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead | |
12 | * of spinning them. | |
13 | * | |
14 | * This relies on the architecture to provide two paravirt hypercalls: | |
15 | * | |
16 | * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val | |
17 | * pv_kick(cpu) -- wakes a suspended vcpu | |
18 | * | |
19 | * Using these we implement __pv_queued_spin_lock_slowpath() and | |
20 | * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and | |
21 | * native_queued_spin_unlock(). | |
22 | */ | |
23 | ||
24 | #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) | |
25 | ||
cd0272fa WL |
26 | /* |
27 | * Queue Node Adaptive Spinning | |
28 | * | |
29 | * A queue node vCPU will stop spinning if the vCPU in the previous node is | |
30 | * not running. The one lock stealing attempt allowed at slowpath entry | |
31 | * mitigates the slight slowdown for non-overcommitted guest with this | |
32 | * aggressive wait-early mechanism. | |
33 | * | |
34 | * The status of the previous node will be checked at fixed interval | |
35 | * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't | |
36 | * pound on the cacheline of the previous node too heavily. | |
37 | */ | |
38 | #define PV_PREV_CHECK_MASK 0xff | |
39 | ||
75d22702 | 40 | /* |
2628cbd0 QZ |
41 | * Queue node uses: VCPU_RUNNING & VCPU_HALTED. |
42 | * Queue head uses: VCPU_RUNNING & VCPU_HASHED. | |
75d22702 | 43 | */ |
a23db284 | 44 | enum vcpu_state { |
2628cbd0 QZ |
45 | VCPU_RUNNING = 0, |
46 | VCPU_HALTED, /* Used only in pv_wait_node */ | |
47 | VCPU_HASHED, /* = pv_hash'ed + VCPU_HALTED */ | |
a23db284 WL |
48 | }; |
49 | ||
50 | struct pv_node { | |
51 | struct mcs_spinlock mcs; | |
a23db284 WL |
52 | int cpu; |
53 | u8 state; | |
54 | }; | |
55 | ||
1c4941fd | 56 | /* |
11752adb WL |
57 | * Hybrid PV queued/unfair lock |
58 | * | |
1c4941fd WL |
59 | * By replacing the regular queued_spin_trylock() with the function below, |
60 | * it will be called once when a lock waiter enter the PV slowpath before | |
11752adb WL |
61 | * being queued. |
62 | * | |
63 | * The pending bit is set by the queue head vCPU of the MCS wait queue in | |
64 | * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. | |
65 | * When that bit becomes visible to the incoming waiters, no lock stealing | |
66 | * is allowed. The function will return immediately to make the waiters | |
67 | * enter the MCS wait queue. So lock starvation shouldn't happen as long | |
68 | * as the queued mode vCPUs are actively running to set the pending bit | |
69 | * and hence disabling lock stealing. | |
70 | * | |
71 | * When the pending bit isn't set, the lock waiters will stay in the unfair | |
72 | * mode spinning on the lock unless the MCS wait queue is empty. In this | |
73 | * case, the lock waiters will enter the queued mode slowpath trying to | |
74 | * become the queue head and set the pending bit. | |
75 | * | |
76 | * This hybrid PV queued/unfair lock combines the best attributes of a | |
77 | * queued lock (no lock starvation) and an unfair lock (good performance | |
78 | * on not heavily contended locks). | |
1c4941fd | 79 | */ |
11752adb WL |
80 | #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) |
81 | static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) | |
1c4941fd | 82 | { |
11752adb WL |
83 | /* |
84 | * Stay in unfair lock mode as long as queued mode waiters are | |
85 | * present in the MCS wait queue but the pending bit isn't set. | |
86 | */ | |
87 | for (;;) { | |
88 | int val = atomic_read(&lock->val); | |
fea0e182 | 89 | u8 old = 0; |
11752adb WL |
90 | |
91 | if (!(val & _Q_LOCKED_PENDING_MASK) && | |
fea0e182 | 92 | try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { |
ad53fa10 | 93 | lockevent_inc(pv_lock_stealing); |
11752adb WL |
94 | return true; |
95 | } | |
96 | if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) | |
97 | break; | |
98 | ||
99 | cpu_relax(); | |
64a5e3cb PZ |
100 | } |
101 | ||
102 | return false; | |
1c4941fd WL |
103 | } |
104 | ||
105 | /* | |
106 | * The pending bit is used by the queue head vCPU to indicate that it | |
107 | * is actively spinning on the lock and no lock stealing is allowed. | |
108 | */ | |
109 | #if _Q_PENDING_BITS == 8 | |
110 | static __always_inline void set_pending(struct qspinlock *lock) | |
111 | { | |
625e88be | 112 | WRITE_ONCE(lock->pending, 1); |
1c4941fd WL |
113 | } |
114 | ||
1c4941fd WL |
115 | /* |
116 | * The pending bit check in pv_queued_spin_steal_lock() isn't a memory | |
34d54f3d WL |
117 | * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the |
118 | * lock just to be sure that it will get it. | |
1c4941fd | 119 | */ |
6a97734f | 120 | static __always_inline bool trylock_clear_pending(struct qspinlock *lock) |
1c4941fd | 121 | { |
6a97734f UB |
122 | u16 old = _Q_PENDING_VAL; |
123 | ||
625e88be | 124 | return !READ_ONCE(lock->locked) && |
6a97734f | 125 | try_cmpxchg_acquire(&lock->locked_pending, &old, _Q_LOCKED_VAL); |
1c4941fd WL |
126 | } |
127 | #else /* _Q_PENDING_BITS == 8 */ | |
128 | static __always_inline void set_pending(struct qspinlock *lock) | |
129 | { | |
e37837fb | 130 | atomic_or(_Q_PENDING_VAL, &lock->val); |
1c4941fd WL |
131 | } |
132 | ||
6a97734f | 133 | static __always_inline bool trylock_clear_pending(struct qspinlock *lock) |
1c4941fd | 134 | { |
6a97734f | 135 | int old, new; |
1c4941fd | 136 | |
6a97734f UB |
137 | old = atomic_read(&lock->val); |
138 | do { | |
139 | if (old & _Q_LOCKED_MASK) | |
140 | return false; | |
1c4941fd WL |
141 | /* |
142 | * Try to clear pending bit & set locked bit | |
143 | */ | |
6a97734f UB |
144 | new = (old & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; |
145 | } while (!atomic_try_cmpxchg_acquire (&lock->val, &old, new)); | |
1c4941fd | 146 | |
6a97734f | 147 | return true; |
1c4941fd WL |
148 | } |
149 | #endif /* _Q_PENDING_BITS == 8 */ | |
150 | ||
a23db284 WL |
151 | /* |
152 | * Lock and MCS node addresses hash table for fast lookup | |
153 | * | |
154 | * Hashing is done on a per-cacheline basis to minimize the need to access | |
155 | * more than one cacheline. | |
156 | * | |
157 | * Dynamically allocate a hash table big enough to hold at least 4X the | |
158 | * number of possible cpus in the system. Allocation is done on page | |
159 | * granularity. So the minimum number of hash buckets should be at least | |
160 | * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page. | |
161 | * | |
162 | * Since we should not be holding locks from NMI context (very rare indeed) the | |
163 | * max load factor is 0.75, which is around the point where open addressing | |
164 | * breaks down. | |
165 | * | |
166 | */ | |
167 | struct pv_hash_entry { | |
168 | struct qspinlock *lock; | |
169 | struct pv_node *node; | |
170 | }; | |
171 | ||
172 | #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) | |
173 | #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) | |
174 | ||
175 | static struct pv_hash_entry *pv_lock_hash; | |
176 | static unsigned int pv_lock_hash_bits __read_mostly; | |
177 | ||
178 | /* | |
179 | * Allocate memory for the PV qspinlock hash buckets | |
180 | * | |
181 | * This function should be called from the paravirt spinlock initialization | |
182 | * routine. | |
183 | */ | |
184 | void __init __pv_init_lock_hash(void) | |
185 | { | |
186 | int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); | |
187 | ||
188 | if (pv_hash_size < PV_HE_MIN) | |
189 | pv_hash_size = PV_HE_MIN; | |
190 | ||
191 | /* | |
192 | * Allocate space from bootmem which should be page-size aligned | |
193 | * and hence cacheline aligned. | |
194 | */ | |
195 | pv_lock_hash = alloc_large_system_hash("PV qspinlock", | |
196 | sizeof(struct pv_hash_entry), | |
3d375d78 PT |
197 | pv_hash_size, 0, |
198 | HASH_EARLY | HASH_ZERO, | |
a23db284 WL |
199 | &pv_lock_hash_bits, NULL, |
200 | pv_hash_size, pv_hash_size); | |
201 | } | |
202 | ||
203 | #define for_each_hash_entry(he, offset, hash) \ | |
204 | for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ | |
205 | offset < (1 << pv_lock_hash_bits); \ | |
206 | offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) | |
207 | ||
208 | static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) | |
209 | { | |
210 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | |
211 | struct pv_hash_entry *he; | |
45e898b7 | 212 | int hopcnt = 0; |
a23db284 WL |
213 | |
214 | for_each_hash_entry(he, offset, hash) { | |
fea0e182 | 215 | struct qspinlock *old = NULL; |
45e898b7 | 216 | hopcnt++; |
fea0e182 | 217 | if (try_cmpxchg(&he->lock, &old, lock)) { |
a23db284 | 218 | WRITE_ONCE(he->node, node); |
ad53fa10 | 219 | lockevent_pv_hop(hopcnt); |
a23db284 WL |
220 | return &he->lock; |
221 | } | |
222 | } | |
223 | /* | |
224 | * Hard assume there is a free entry for us. | |
225 | * | |
226 | * This is guaranteed by ensuring every blocked lock only ever consumes | |
227 | * a single entry, and since we only have 4 nesting levels per CPU | |
228 | * and allocated 4*nr_possible_cpus(), this must be so. | |
229 | * | |
230 | * The single entry is guaranteed by having the lock owner unhash | |
231 | * before it releases. | |
232 | */ | |
233 | BUG(); | |
234 | } | |
235 | ||
236 | static struct pv_node *pv_unhash(struct qspinlock *lock) | |
237 | { | |
238 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | |
239 | struct pv_hash_entry *he; | |
240 | struct pv_node *node; | |
241 | ||
242 | for_each_hash_entry(he, offset, hash) { | |
243 | if (READ_ONCE(he->lock) == lock) { | |
244 | node = READ_ONCE(he->node); | |
245 | WRITE_ONCE(he->lock, NULL); | |
246 | return node; | |
247 | } | |
248 | } | |
249 | /* | |
250 | * Hard assume we'll find an entry. | |
251 | * | |
252 | * This guarantees a limited lookup time and is itself guaranteed by | |
253 | * having the lock owner do the unhash -- IFF the unlock sees the | |
254 | * SLOW flag, there MUST be a hash entry. | |
255 | */ | |
256 | BUG(); | |
257 | } | |
258 | ||
cd0272fa WL |
259 | /* |
260 | * Return true if when it is time to check the previous node which is not | |
261 | * in a running state. | |
262 | */ | |
263 | static inline bool | |
264 | pv_wait_early(struct pv_node *prev, int loop) | |
265 | { | |
cd0272fa WL |
266 | if ((loop & PV_PREV_CHECK_MASK) != 0) |
267 | return false; | |
268 | ||
2628cbd0 | 269 | return READ_ONCE(prev->state) != VCPU_RUNNING; |
cd0272fa WL |
270 | } |
271 | ||
a23db284 WL |
272 | /* |
273 | * Initialize the PV part of the mcs_spinlock node. | |
274 | */ | |
275 | static void pv_init_node(struct mcs_spinlock *node) | |
276 | { | |
277 | struct pv_node *pn = (struct pv_node *)node; | |
278 | ||
0fa809ca | 279 | BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode)); |
a23db284 WL |
280 | |
281 | pn->cpu = smp_processor_id(); | |
2628cbd0 | 282 | pn->state = VCPU_RUNNING; |
a23db284 WL |
283 | } |
284 | ||
285 | /* | |
286 | * Wait for node->locked to become true, halt the vcpu after a short spin. | |
75d22702 WL |
287 | * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its |
288 | * behalf. | |
a23db284 | 289 | */ |
cd0272fa | 290 | static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) |
a23db284 WL |
291 | { |
292 | struct pv_node *pn = (struct pv_node *)node; | |
cd0272fa | 293 | struct pv_node *pp = (struct pv_node *)prev; |
3774b28d | 294 | bool wait_early; |
a23db284 WL |
295 | int loop; |
296 | ||
08be8f63 | 297 | for (;;) { |
cd0272fa | 298 | for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { |
a23db284 WL |
299 | if (READ_ONCE(node->locked)) |
300 | return; | |
cd0272fa WL |
301 | if (pv_wait_early(pp, loop)) { |
302 | wait_early = true; | |
303 | break; | |
304 | } | |
a23db284 WL |
305 | cpu_relax(); |
306 | } | |
307 | ||
308 | /* | |
309 | * Order pn->state vs pn->locked thusly: | |
310 | * | |
2628cbd0 | 311 | * [S] pn->state = VCPU_HALTED [S] next->locked = 1 |
a23db284 | 312 | * MB MB |
2628cbd0 | 313 | * [L] pn->locked [RmW] pn->state = VCPU_HASHED |
a23db284 | 314 | * |
75d22702 | 315 | * Matches the cmpxchg() from pv_kick_node(). |
a23db284 | 316 | */ |
2628cbd0 | 317 | smp_store_mb(pn->state, VCPU_HALTED); |
a23db284 | 318 | |
45e898b7 | 319 | if (!READ_ONCE(node->locked)) { |
ad53fa10 WL |
320 | lockevent_inc(pv_wait_node); |
321 | lockevent_cond_inc(pv_wait_early, wait_early); | |
2628cbd0 | 322 | pv_wait(&pn->state, VCPU_HALTED); |
45e898b7 | 323 | } |
a23db284 WL |
324 | |
325 | /* | |
2628cbd0 | 326 | * If pv_kick_node() changed us to VCPU_HASHED, retain that |
1c4941fd WL |
327 | * value so that pv_wait_head_or_lock() knows to not also try |
328 | * to hash this lock. | |
a23db284 | 329 | */ |
2628cbd0 | 330 | cmpxchg(&pn->state, VCPU_HALTED, VCPU_RUNNING); |
a23db284 WL |
331 | |
332 | /* | |
333 | * If the locked flag is still not set after wakeup, it is a | |
334 | * spurious wakeup and the vCPU should wait again. However, | |
335 | * there is a pretty high overhead for CPU halting and kicking. | |
336 | * So it is better to spin for a while in the hope that the | |
337 | * MCS lock will be released soon. | |
338 | */ | |
ad53fa10 WL |
339 | lockevent_cond_inc(pv_spurious_wakeup, |
340 | !READ_ONCE(node->locked)); | |
a23db284 | 341 | } |
75d22702 | 342 | |
a23db284 WL |
343 | /* |
344 | * By now our node->locked should be 1 and our caller will not actually | |
345 | * spin-wait for it. We do however rely on our caller to do a | |
346 | * load-acquire for us. | |
347 | */ | |
348 | } | |
349 | ||
350 | /* | |
75d22702 WL |
351 | * Called after setting next->locked = 1 when we're the lock owner. |
352 | * | |
1c4941fd WL |
353 | * Instead of waking the waiters stuck in pv_wait_node() advance their state |
354 | * such that they're waiting in pv_wait_head_or_lock(), this avoids a | |
355 | * wake/sleep cycle. | |
a23db284 | 356 | */ |
75d22702 | 357 | static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) |
a23db284 WL |
358 | { |
359 | struct pv_node *pn = (struct pv_node *)node; | |
2628cbd0 | 360 | u8 old = VCPU_HALTED; |
a23db284 | 361 | /* |
75d22702 WL |
362 | * If the vCPU is indeed halted, advance its state to match that of |
363 | * pv_wait_node(). If OTOH this fails, the vCPU was running and will | |
364 | * observe its next->locked value and advance itself. | |
a23db284 | 365 | * |
75d22702 | 366 | * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() |
34d54f3d WL |
367 | * |
368 | * The write to next->locked in arch_mcs_spin_unlock_contended() | |
369 | * must be ordered before the read of pn->state in the cmpxchg() | |
370 | * below for the code to work correctly. To guarantee full ordering | |
371 | * irrespective of the success or failure of the cmpxchg(), | |
372 | * a relaxed version with explicit barrier is used. The control | |
373 | * dependency will order the reading of pn->state before any | |
374 | * subsequent writes. | |
75d22702 | 375 | */ |
34d54f3d | 376 | smp_mb__before_atomic(); |
2628cbd0 | 377 | if (!try_cmpxchg_relaxed(&pn->state, &old, VCPU_HASHED)) |
75d22702 WL |
378 | return; |
379 | ||
380 | /* | |
381 | * Put the lock into the hash table and set the _Q_SLOW_VAL. | |
a23db284 | 382 | * |
75d22702 WL |
383 | * As this is the same vCPU that will check the _Q_SLOW_VAL value and |
384 | * the hash table later on at unlock time, no atomic instruction is | |
385 | * needed. | |
a23db284 | 386 | */ |
625e88be | 387 | WRITE_ONCE(lock->locked, _Q_SLOW_VAL); |
75d22702 | 388 | (void)pv_hash(lock, pn); |
a23db284 WL |
389 | } |
390 | ||
391 | /* | |
1c4941fd WL |
392 | * Wait for l->locked to become clear and acquire the lock; |
393 | * halt the vcpu after a short spin. | |
a23db284 | 394 | * __pv_queued_spin_unlock() will wake us. |
1c4941fd WL |
395 | * |
396 | * The current value of the lock will be returned for additional processing. | |
a23db284 | 397 | */ |
1c4941fd WL |
398 | static u32 |
399 | pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) | |
a23db284 WL |
400 | { |
401 | struct pv_node *pn = (struct pv_node *)node; | |
a23db284 | 402 | struct qspinlock **lp = NULL; |
45e898b7 | 403 | int waitcnt = 0; |
a23db284 WL |
404 | int loop; |
405 | ||
75d22702 WL |
406 | /* |
407 | * If pv_kick_node() already advanced our state, we don't need to | |
408 | * insert ourselves into the hash table anymore. | |
409 | */ | |
2628cbd0 | 410 | if (READ_ONCE(pn->state) == VCPU_HASHED) |
75d22702 WL |
411 | lp = (struct qspinlock **)1; |
412 | ||
32d62510 WL |
413 | /* |
414 | * Tracking # of slowpath locking operations | |
415 | */ | |
ad53fa10 | 416 | lockevent_inc(lock_slowpath); |
32d62510 | 417 | |
45e898b7 | 418 | for (;; waitcnt++) { |
cd0272fa WL |
419 | /* |
420 | * Set correct vCPU state to be used by queue node wait-early | |
421 | * mechanism. | |
422 | */ | |
2628cbd0 | 423 | WRITE_ONCE(pn->state, VCPU_RUNNING); |
cd0272fa | 424 | |
1c4941fd WL |
425 | /* |
426 | * Set the pending bit in the active lock spinning loop to | |
427 | * disable lock stealing before attempting to acquire the lock. | |
428 | */ | |
429 | set_pending(lock); | |
a23db284 | 430 | for (loop = SPIN_THRESHOLD; loop; loop--) { |
1c4941fd WL |
431 | if (trylock_clear_pending(lock)) |
432 | goto gotlock; | |
a23db284 WL |
433 | cpu_relax(); |
434 | } | |
1c4941fd WL |
435 | clear_pending(lock); |
436 | ||
a23db284 | 437 | |
a23db284 WL |
438 | if (!lp) { /* ONCE */ |
439 | lp = pv_hash(lock, pn); | |
75d22702 | 440 | |
a23db284 | 441 | /* |
3b3fdf10 WD |
442 | * We must hash before setting _Q_SLOW_VAL, such that |
443 | * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() | |
444 | * we'll be sure to be able to observe our hash entry. | |
a23db284 | 445 | * |
3b3fdf10 WD |
446 | * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL |
447 | * MB RMB | |
448 | * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> | |
a23db284 | 449 | * |
3b3fdf10 | 450 | * Matches the smp_rmb() in __pv_queued_spin_unlock(). |
a23db284 | 451 | */ |
625e88be | 452 | if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { |
a23db284 | 453 | /* |
1c4941fd WL |
454 | * The lock was free and now we own the lock. |
455 | * Change the lock value back to _Q_LOCKED_VAL | |
456 | * and unhash the table. | |
a23db284 | 457 | */ |
625e88be | 458 | WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); |
a23db284 | 459 | WRITE_ONCE(*lp, NULL); |
1c4941fd | 460 | goto gotlock; |
a23db284 WL |
461 | } |
462 | } | |
2628cbd0 | 463 | WRITE_ONCE(pn->state, VCPU_HASHED); |
ad53fa10 WL |
464 | lockevent_inc(pv_wait_head); |
465 | lockevent_cond_inc(pv_wait_again, waitcnt); | |
625e88be | 466 | pv_wait(&lock->locked, _Q_SLOW_VAL); |
a23db284 WL |
467 | |
468 | /* | |
08be8f63 WL |
469 | * Because of lock stealing, the queue head vCPU may not be |
470 | * able to acquire the lock before it has to wait again. | |
a23db284 WL |
471 | */ |
472 | } | |
473 | ||
474 | /* | |
1c4941fd WL |
475 | * The cmpxchg() or xchg() call before coming here provides the |
476 | * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL | |
477 | * here is to indicate to the compiler that the value will always | |
478 | * be nozero to enable better code optimization. | |
a23db284 | 479 | */ |
1c4941fd WL |
480 | gotlock: |
481 | return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); | |
a23db284 WL |
482 | } |
483 | ||
8874a414 AB |
484 | /* |
485 | * Include the architecture specific callee-save thunk of the | |
486 | * __pv_queued_spin_unlock(). This thunk is put together with | |
487 | * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock | |
488 | * function close to each other sharing consecutive instruction cachelines. | |
489 | * Alternatively, architecture specific version of __pv_queued_spin_unlock() | |
490 | * can be defined. | |
491 | */ | |
492 | #include <asm/qspinlock_paravirt.h> | |
493 | ||
a23db284 | 494 | /* |
d7804530 WL |
495 | * PV versions of the unlock fastpath and slowpath functions to be used |
496 | * instead of queued_spin_unlock(). | |
a23db284 | 497 | */ |
501f7f69 | 498 | __visible __lockfunc void |
d7804530 | 499 | __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) |
a23db284 | 500 | { |
a23db284 | 501 | struct pv_node *node; |
a23db284 | 502 | |
0b792bf5 PZ |
503 | if (unlikely(locked != _Q_SLOW_VAL)) { |
504 | WARN(!debug_locks_silent, | |
505 | "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", | |
506 | (unsigned long)lock, atomic_read(&lock->val)); | |
cba77f03 WL |
507 | return; |
508 | } | |
509 | ||
3b3fdf10 WD |
510 | /* |
511 | * A failed cmpxchg doesn't provide any memory-ordering guarantees, | |
512 | * so we need a barrier to order the read of the node data in | |
513 | * pv_unhash *after* we've read the lock being _Q_SLOW_VAL. | |
514 | * | |
1c4941fd | 515 | * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL. |
3b3fdf10 WD |
516 | */ |
517 | smp_rmb(); | |
518 | ||
a23db284 WL |
519 | /* |
520 | * Since the above failed to release, this must be the SLOW path. | |
521 | * Therefore start by looking up the blocked node and unhashing it. | |
522 | */ | |
523 | node = pv_unhash(lock); | |
524 | ||
525 | /* | |
526 | * Now that we have a reference to the (likely) blocked pv_node, | |
527 | * release the lock. | |
528 | */ | |
625e88be | 529 | smp_store_release(&lock->locked, 0); |
a23db284 WL |
530 | |
531 | /* | |
532 | * At this point the memory pointed at by lock can be freed/reused, | |
533 | * however we can still use the pv_node to kick the CPU. | |
75d22702 WL |
534 | * The other vCPU may not really be halted, but kicking an active |
535 | * vCPU is harmless other than the additional latency in completing | |
536 | * the unlock. | |
a23db284 | 537 | */ |
ad53fa10 | 538 | lockevent_inc(pv_kick_unlock); |
93edc8bd | 539 | pv_kick(node->cpu); |
a23db284 | 540 | } |
d7804530 | 541 | |
d7804530 | 542 | #ifndef __pv_queued_spin_unlock |
501f7f69 | 543 | __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) |
d7804530 | 544 | { |
fea0e182 | 545 | u8 locked = _Q_LOCKED_VAL; |
d7804530 WL |
546 | |
547 | /* | |
548 | * We must not unlock if SLOW, because in that case we must first | |
549 | * unhash. Otherwise it would be possible to have multiple @lock | |
550 | * entries, which would be BAD. | |
551 | */ | |
fea0e182 | 552 | if (try_cmpxchg_release(&lock->locked, &locked, 0)) |
d7804530 WL |
553 | return; |
554 | ||
555 | __pv_queued_spin_unlock_slowpath(lock, locked); | |
556 | } | |
557 | #endif /* __pv_queued_spin_unlock */ |