1 #ifndef _GEN_PV_LOCK_SLOWPATH
2 #error "do not include this file"
5 #include <linux/hash.h>
6 #include <linux/bootmem.h>
7 #include <linux/debug_locks.h>
10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
13 * This relies on the architecture to provide two paravirt hypercalls:
15 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
16 * pv_kick(cpu) -- wakes a suspended vcpu
18 * Using these we implement __pv_queued_spin_lock_slowpath() and
19 * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
20 * native_queued_spin_unlock().
23 #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
31 struct mcs_spinlock mcs;
32 struct mcs_spinlock __res[3];
39 * Lock and MCS node addresses hash table for fast lookup
41 * Hashing is done on a per-cacheline basis to minimize the need to access
42 * more than one cacheline.
44 * Dynamically allocate a hash table big enough to hold at least 4X the
45 * number of possible cpus in the system. Allocation is done on page
46 * granularity. So the minimum number of hash buckets should be at least
47 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
49 * Since we should not be holding locks from NMI context (very rare indeed) the
50 * max load factor is 0.75, which is around the point where open addressing
54 struct pv_hash_entry {
55 struct qspinlock *lock;
59 #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
60 #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry))
62 static struct pv_hash_entry *pv_lock_hash;
63 static unsigned int pv_lock_hash_bits __read_mostly;
66 * Allocate memory for the PV qspinlock hash buckets
68 * This function should be called from the paravirt spinlock initialization
71 void __init __pv_init_lock_hash(void)
73 int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
75 if (pv_hash_size < PV_HE_MIN)
76 pv_hash_size = PV_HE_MIN;
79 * Allocate space from bootmem which should be page-size aligned
80 * and hence cacheline aligned.
82 pv_lock_hash = alloc_large_system_hash("PV qspinlock",
83 sizeof(struct pv_hash_entry),
84 pv_hash_size, 0, HASH_EARLY,
85 &pv_lock_hash_bits, NULL,
86 pv_hash_size, pv_hash_size);
89 #define for_each_hash_entry(he, offset, hash) \
90 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
91 offset < (1 << pv_lock_hash_bits); \
92 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
94 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
96 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
97 struct pv_hash_entry *he;
99 for_each_hash_entry(he, offset, hash) {
100 if (!cmpxchg(&he->lock, NULL, lock)) {
101 WRITE_ONCE(he->node, node);
106 * Hard assume there is a free entry for us.
108 * This is guaranteed by ensuring every blocked lock only ever consumes
109 * a single entry, and since we only have 4 nesting levels per CPU
110 * and allocated 4*nr_possible_cpus(), this must be so.
112 * The single entry is guaranteed by having the lock owner unhash
113 * before it releases.
118 static struct pv_node *pv_unhash(struct qspinlock *lock)
120 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
121 struct pv_hash_entry *he;
122 struct pv_node *node;
124 for_each_hash_entry(he, offset, hash) {
125 if (READ_ONCE(he->lock) == lock) {
126 node = READ_ONCE(he->node);
127 WRITE_ONCE(he->lock, NULL);
132 * Hard assume we'll find an entry.
134 * This guarantees a limited lookup time and is itself guaranteed by
135 * having the lock owner do the unhash -- IFF the unlock sees the
136 * SLOW flag, there MUST be a hash entry.
142 * Initialize the PV part of the mcs_spinlock node.
144 static void pv_init_node(struct mcs_spinlock *node)
146 struct pv_node *pn = (struct pv_node *)node;
148 BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
150 pn->cpu = smp_processor_id();
151 pn->state = vcpu_running;
155 * Wait for node->locked to become true, halt the vcpu after a short spin.
156 * pv_kick_node() is used to wake the vcpu again.
158 static void pv_wait_node(struct mcs_spinlock *node)
160 struct pv_node *pn = (struct pv_node *)node;
164 for (loop = SPIN_THRESHOLD; loop; loop--) {
165 if (READ_ONCE(node->locked))
171 * Order pn->state vs pn->locked thusly:
173 * [S] pn->state = vcpu_halted [S] next->locked = 1
175 * [L] pn->locked [RmW] pn->state = vcpu_running
177 * Matches the xchg() from pv_kick_node().
179 smp_store_mb(pn->state, vcpu_halted);
181 if (!READ_ONCE(node->locked))
182 pv_wait(&pn->state, vcpu_halted);
185 * Reset the vCPU state to avoid unncessary CPU kicking
187 WRITE_ONCE(pn->state, vcpu_running);
190 * If the locked flag is still not set after wakeup, it is a
191 * spurious wakeup and the vCPU should wait again. However,
192 * there is a pretty high overhead for CPU halting and kicking.
193 * So it is better to spin for a while in the hope that the
194 * MCS lock will be released soon.
198 * By now our node->locked should be 1 and our caller will not actually
199 * spin-wait for it. We do however rely on our caller to do a
200 * load-acquire for us.
205 * Called after setting next->locked = 1, used to wake those stuck in
208 static void pv_kick_node(struct mcs_spinlock *node)
210 struct pv_node *pn = (struct pv_node *)node;
213 * Note that because node->locked is already set, this actual
214 * mcs_spinlock entry could be re-used already.
216 * This should be fine however, kicking people for no reason is
219 * See the comment in pv_wait_node().
221 if (xchg(&pn->state, vcpu_running) == vcpu_halted)
226 * Wait for l->locked to become clear; halt the vcpu after a short spin.
227 * __pv_queued_spin_unlock() will wake us.
229 static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
231 struct pv_node *pn = (struct pv_node *)node;
232 struct __qspinlock *l = (void *)lock;
233 struct qspinlock **lp = NULL;
237 for (loop = SPIN_THRESHOLD; loop; loop--) {
238 if (!READ_ONCE(l->locked))
243 WRITE_ONCE(pn->state, vcpu_halted);
244 if (!lp) { /* ONCE */
245 lp = pv_hash(lock, pn);
247 * lp must be set before setting _Q_SLOW_VAL
249 * [S] lp = lock [RmW] l = l->locked = 0
251 * [S] l->locked = _Q_SLOW_VAL [L] lp
253 * Matches the cmpxchg() in __pv_queued_spin_unlock().
255 if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
257 * The lock is free and _Q_SLOW_VAL has never
258 * been set. Therefore we need to unhash before
261 WRITE_ONCE(*lp, NULL);
265 pv_wait(&l->locked, _Q_SLOW_VAL);
268 * The unlocker should have freed the lock before kicking the
269 * CPU. So if the lock is still not free, it is a spurious
270 * wakeup and so the vCPU should wait again after spinning for
276 * Lock is unlocked now; the caller will acquire it without waiting.
277 * As with pv_wait_node() we rely on the caller to do a load-acquire
283 * PV version of the unlock function to be used in stead of
284 * queued_spin_unlock().
286 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
288 struct __qspinlock *l = (void *)lock;
289 struct pv_node *node;
290 u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
293 * We must not unlock if SLOW, because in that case we must first
294 * unhash. Otherwise it would be possible to have multiple @lock
295 * entries, which would be BAD.
297 if (likely(lockval == _Q_LOCKED_VAL))
300 if (unlikely(lockval != _Q_SLOW_VAL)) {
301 if (debug_locks_silent)
303 WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
308 * Since the above failed to release, this must be the SLOW path.
309 * Therefore start by looking up the blocked node and unhashing it.
311 node = pv_unhash(lock);
314 * Now that we have a reference to the (likely) blocked pv_node,
317 smp_store_release(&l->locked, 0);
320 * At this point the memory pointed at by lock can be freed/reused,
321 * however we can still use the pv_node to kick the CPU.
323 if (READ_ONCE(node->state) == vcpu_halted)
327 * Include the architecture specific callee-save thunk of the
328 * __pv_queued_spin_unlock(). This thunk is put together with
329 * __pv_queued_spin_unlock() near the top of the file to make sure
330 * that the callee-save thunk and the real unlock function are close
331 * to each other sharing consecutive instruction cachelines.
333 #include <asm/qspinlock_paravirt.h>