1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/percpu.h>
3 #include <linux/sched.h>
4 #include <linux/osq_lock.h>
7 * An MCS like lock especially tailored for optimistic spinning for sleeping
8 * lock implementations (mutex, rwsem, etc).
10 * Using a single mcs node per CPU is safe because sleeping locks should not be
11 * called from interrupt context and we have preemption disabled while
15 struct optimistic_spin_node {
16 struct optimistic_spin_node *next, *prev;
17 int locked; /* 1 if lock acquired */
18 int cpu; /* encoded CPU # + 1 value */
21 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
24 * We use the value 0 to represent "no CPU", thus the encoded value
25 * will be the CPU number incremented by 1.
27 static inline int encode_cpu(int cpu_nr)
32 static inline int node_cpu(struct optimistic_spin_node *node)
37 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
39 int cpu_nr = encoded_cpu_val - 1;
41 return per_cpu_ptr(&osq_node, cpu_nr);
45 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
46 * Can return NULL in case we were the last queued and we updated @lock instead.
48 * If osq_lock() is being cancelled there must be a previous node
49 * and 'old_cpu' is its CPU #.
50 * For osq_unlock() there is never a previous node and old_cpu is
51 * set to OSQ_UNLOCKED_VAL.
53 static inline struct optimistic_spin_node *
54 osq_wait_next(struct optimistic_spin_queue *lock,
55 struct optimistic_spin_node *node,
58 int curr = encode_cpu(smp_processor_id());
61 if (atomic_read(&lock->tail) == curr &&
62 atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) {
64 * We were the last queued, we moved @lock back. @prev
65 * will now observe @lock and will complete its
72 * We must xchg() the @node->next value, because if we were to
73 * leave it in, a concurrent unlock()/unqueue() from
74 * @node->next might complete Step-A and think its @prev is
77 * If the concurrent unlock()/unqueue() wins the race, we'll
78 * wait for either @lock to point to us, through its Step-B, or
79 * wait for a new @node->next from its Step-C.
82 struct optimistic_spin_node *next;
84 next = xchg(&node->next, NULL);
93 bool osq_lock(struct optimistic_spin_queue *lock)
95 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
96 struct optimistic_spin_node *prev, *next;
97 int curr = encode_cpu(smp_processor_id());
105 * We need both ACQUIRE (pairs with corresponding RELEASE in
106 * unlock() uncontended, or fastpath) and RELEASE (to publish
107 * the node fields we just initialised) semantics when updating
110 old = atomic_xchg(&lock->tail, curr);
111 if (old == OSQ_UNLOCKED_VAL)
114 prev = decode_cpu(old);
120 * node->prev = prev osq_wait_next()
122 * prev->next = node next->prev = prev // unqueue-C
124 * Here 'node->prev' and 'next->prev' are the same variable and we need
125 * to ensure these stores happen in-order to avoid corrupting the list.
129 WRITE_ONCE(prev->next, node);
132 * Normally @prev is untouchable after the above store; because at that
133 * moment unlock can proceed and wipe the node element from stack.
135 * However, since our nodes are static per-cpu storage, we're
136 * guaranteed their existence -- this allows us to apply
137 * cmpxchg in an attempt to undo our queueing.
141 * Wait to acquire the lock or cancellation. Note that need_resched()
142 * will come with an IPI, which will wake smp_cond_load_relaxed() if it
143 * is implemented with a monitor-wait. vcpu_is_preempted() relies on
144 * polling, be careful.
146 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
147 vcpu_is_preempted(node_cpu(node->prev))))
152 * Step - A -- stabilize @prev
154 * Undo our @prev->next assignment; this will make @prev's
155 * unlock()/unqueue() wait for a next pointer since @lock points to us
161 * cpu_relax() below implies a compiler barrier which would
162 * prevent this comparison being optimized away.
164 if (data_race(prev->next) == node &&
165 cmpxchg(&prev->next, node, NULL) == node)
169 * We can only fail the cmpxchg() racing against an unlock(),
170 * in which case we should observe @node->locked becoming
173 if (smp_load_acquire(&node->locked))
179 * Or we race against a concurrent unqueue()'s step-B, in which
180 * case its step-C will write us a new @node->prev pointer.
182 prev = READ_ONCE(node->prev);
186 * Step - B -- stabilize @next
188 * Similar to unlock(), wait for @node->next or move @lock from @node
192 next = osq_wait_next(lock, node, prev->cpu);
199 * @prev is stable because its still waiting for a new @prev->next
200 * pointer, @next is stable because our @node->next pointer is NULL and
201 * it will wait in Step-A.
204 WRITE_ONCE(next->prev, prev);
205 WRITE_ONCE(prev->next, next);
210 void osq_unlock(struct optimistic_spin_queue *lock)
212 struct optimistic_spin_node *node, *next;
213 int curr = encode_cpu(smp_processor_id());
216 * Fast path for the uncontended case.
218 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
219 OSQ_UNLOCKED_VAL) == curr))
223 * Second most likely case.
225 node = this_cpu_ptr(&osq_node);
226 next = xchg(&node->next, NULL);
228 WRITE_ONCE(next->locked, 1);
232 next = osq_wait_next(lock, node, OSQ_UNLOCKED_VAL);
234 WRITE_ONCE(next->locked, 1);