1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/compiler.h>
4 #include <linux/export.h>
5 #include <linux/percpu.h>
6 #include <linux/processor.h>
8 #include <linux/topology.h>
9 #include <linux/sched/clock.h>
10 #include <asm/qspinlock.h>
11 #include <asm/paravirt.h>
17 struct qspinlock *lock;
20 u8 locked; /* 1 if lock acquired */
25 struct qnode nodes[MAX_NODES];
28 /* Tuning parameters */
29 static int steal_spins __read_mostly = (1 << 5);
30 static int remote_steal_spins __read_mostly = (1 << 2);
31 #if _Q_SPIN_TRY_LOCK_STEAL == 1
32 static const bool maybe_stealers = true;
34 static bool maybe_stealers __read_mostly = true;
36 static int head_spins __read_mostly = (1 << 8);
38 static bool pv_yield_owner __read_mostly = true;
39 static bool pv_yield_allow_steal __read_mostly = false;
40 static bool pv_spin_on_preempted_owner __read_mostly = false;
41 static bool pv_sleepy_lock __read_mostly = true;
42 static bool pv_sleepy_lock_sticky __read_mostly = false;
43 static u64 pv_sleepy_lock_interval_ns __read_mostly = 0;
44 static int pv_sleepy_lock_factor __read_mostly = 256;
45 static bool pv_yield_prev __read_mostly = true;
46 static bool pv_yield_propagate_owner __read_mostly = true;
47 static bool pv_prod_head __read_mostly = false;
49 static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
50 static DEFINE_PER_CPU_ALIGNED(u64, sleepy_lock_seen_clock);
52 #if _Q_SPIN_SPEC_BARRIER == 1
53 #define spec_barrier() do { asm volatile("ori 31,31,0" ::: "memory"); } while (0)
55 #define spec_barrier() do { } while (0)
58 static __always_inline bool recently_sleepy(void)
60 /* pv_sleepy_lock is true when this is called */
61 if (pv_sleepy_lock_interval_ns) {
62 u64 seen = this_cpu_read(sleepy_lock_seen_clock);
65 u64 delta = sched_clock() - seen;
66 if (delta < pv_sleepy_lock_interval_ns)
68 this_cpu_write(sleepy_lock_seen_clock, 0);
75 static __always_inline int get_steal_spins(bool paravirt, bool sleepy)
77 if (paravirt && sleepy)
78 return steal_spins * pv_sleepy_lock_factor;
83 static __always_inline int get_remote_steal_spins(bool paravirt, bool sleepy)
85 if (paravirt && sleepy)
86 return remote_steal_spins * pv_sleepy_lock_factor;
88 return remote_steal_spins;
91 static __always_inline int get_head_spins(bool paravirt, bool sleepy)
93 if (paravirt && sleepy)
94 return head_spins * pv_sleepy_lock_factor;
99 static inline u32 encode_tail_cpu(int cpu)
101 return (cpu + 1) << _Q_TAIL_CPU_OFFSET;
104 static inline int decode_tail_cpu(u32 val)
106 return (val >> _Q_TAIL_CPU_OFFSET) - 1;
109 static inline int get_owner_cpu(u32 val)
111 return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET;
115 * Try to acquire the lock if it was not already locked. If the tail matches
116 * mytail then clear it, otherwise leave it unchnaged. Return previous value.
118 * This is used by the head of the queue to acquire the lock and clean up
119 * its tail if it was the last one queued.
121 static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail)
123 u32 newval = queued_spin_encode_locked_val();
127 "1: lwarx %0,0,%2,%7 # trylock_clean_tail \n"
128 /* This test is necessary if there could be stealers */
131 /* Test whether the lock tail == mytail */
134 /* Merge the new locked value */
137 /* If the lock tail matched, then clear it, otherwise leave it. */
139 "2: stwcx. %1,0,%2 \n"
141 "\t" PPC_ACQUIRE_BARRIER " \n"
143 : "=&r" (prev), "=&r" (tmp)
144 : "r" (&lock->val), "r"(tail), "r" (newval),
146 "r" (_Q_TAIL_CPU_MASK),
147 "i" (_Q_SPIN_EH_HINT)
154 * Publish our tail, replacing previous tail. Return previous value.
156 * This provides a release barrier for publishing node, this pairs with the
157 * acquire barrier in get_tail_qnode() when the next CPU finds this tail
160 static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail)
165 "\t" PPC_RELEASE_BARRIER " \n"
166 "1: lwarx %0,0,%2 # publish_tail_cpu \n"
171 : "=&r" (prev), "=&r"(tmp)
172 : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK)
178 static __always_inline u32 set_mustq(struct qspinlock *lock)
183 "1: lwarx %0,0,%1 # set_mustq \n"
188 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
194 static __always_inline u32 clear_mustq(struct qspinlock *lock)
199 "1: lwarx %0,0,%1 # clear_mustq \n"
204 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
210 static __always_inline bool try_set_sleepy(struct qspinlock *lock, u32 old)
213 u32 new = old | _Q_SLEEPY_VAL;
215 BUG_ON(!(old & _Q_LOCKED_VAL));
216 BUG_ON(old & _Q_SLEEPY_VAL);
219 "1: lwarx %0,0,%1 # try_set_sleepy \n"
226 : "r" (&lock->val), "r"(old), "r" (new)
229 return likely(prev == old);
232 static __always_inline void seen_sleepy_owner(struct qspinlock *lock, u32 val)
234 if (pv_sleepy_lock) {
235 if (pv_sleepy_lock_interval_ns)
236 this_cpu_write(sleepy_lock_seen_clock, sched_clock());
237 if (!(val & _Q_SLEEPY_VAL))
238 try_set_sleepy(lock, val);
242 static __always_inline void seen_sleepy_lock(void)
244 if (pv_sleepy_lock && pv_sleepy_lock_interval_ns)
245 this_cpu_write(sleepy_lock_seen_clock, sched_clock());
248 static __always_inline void seen_sleepy_node(struct qspinlock *lock, u32 val)
250 if (pv_sleepy_lock) {
251 if (pv_sleepy_lock_interval_ns)
252 this_cpu_write(sleepy_lock_seen_clock, sched_clock());
253 if (val & _Q_LOCKED_VAL) {
254 if (!(val & _Q_SLEEPY_VAL))
255 try_set_sleepy(lock, val);
260 static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
262 int cpu = decode_tail_cpu(val);
263 struct qnodes *qnodesp = per_cpu_ptr(&qnodes, cpu);
267 * After publishing the new tail and finding a previous tail in the
268 * previous val (which is the control dependency), this barrier
269 * orders the release barrier in publish_tail_cpu performed by the
270 * last CPU, with subsequently looking at its qnode structures
273 smp_acquire__after_ctrl_dep();
275 for (idx = 0; idx < MAX_NODES; idx++) {
276 struct qnode *qnode = &qnodesp->nodes[idx];
277 if (qnode->lock == lock)
284 /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
285 static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq)
289 bool preempted = false;
291 BUG_ON(!(val & _Q_LOCKED_VAL));
299 owner = get_owner_cpu(val);
300 yield_count = yield_count_of(owner);
302 if ((yield_count & 1) == 0)
303 goto relax; /* owner vcpu is running */
307 seen_sleepy_owner(lock, val);
311 * Read the lock word after sampling the yield count. On the other side
312 * there may a wmb because the yield count update is done by the
313 * hypervisor preemption and the value update by the OS, however this
314 * ordering might reduce the chance of out of order accesses and
315 * improve the heuristic.
319 if (READ_ONCE(lock->val) == val) {
322 yield_to_preempted(owner, yield_count);
327 /* Don't relax if we yielded. Maybe we should? */
337 /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
338 static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
340 return __yield_to_locked_owner(lock, val, paravirt, false);
343 /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
344 static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
348 if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal)
351 return __yield_to_locked_owner(lock, val, paravirt, mustq);
354 static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt)
361 if (!pv_yield_propagate_owner)
364 owner = get_owner_cpu(val);
365 if (*set_yield_cpu == owner)
368 next = READ_ONCE(node->next);
372 if (vcpu_is_preempted(owner)) {
373 next->yield_cpu = owner;
374 *set_yield_cpu = owner;
375 } else if (*set_yield_cpu != -1) {
376 next->yield_cpu = owner;
377 *set_yield_cpu = owner;
381 /* Called inside spin_begin() */
382 static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt)
384 int prev_cpu = decode_tail_cpu(val);
387 bool preempted = false;
392 if (!pv_yield_propagate_owner)
395 yield_cpu = READ_ONCE(node->yield_cpu);
396 if (yield_cpu == -1) {
397 /* Propagate back the -1 CPU */
398 if (node->next && node->next->yield_cpu != -1)
399 node->next->yield_cpu = yield_cpu;
403 yield_count = yield_count_of(yield_cpu);
404 if ((yield_count & 1) == 0)
405 goto yield_prev; /* owner vcpu is running */
410 seen_sleepy_node(lock, val);
414 if (yield_cpu == node->yield_cpu) {
415 if (node->next && node->next->yield_cpu != yield_cpu)
416 node->next->yield_cpu = yield_cpu;
417 yield_to_preempted(yield_cpu, yield_count);
427 yield_count = yield_count_of(prev_cpu);
428 if ((yield_count & 1) == 0)
429 goto relax; /* owner vcpu is running */
434 seen_sleepy_node(lock, val);
436 smp_rmb(); /* See __yield_to_locked_owner comment */
439 yield_to_preempted(prev_cpu, yield_count);
451 static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy)
453 if (iters >= get_steal_spins(paravirt, sleepy))
456 if (IS_ENABLED(CONFIG_NUMA) &&
457 (iters >= get_remote_steal_spins(paravirt, sleepy))) {
458 int cpu = get_owner_cpu(val);
459 if (numa_node_id() != cpu_to_node(cpu))
465 static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt)
467 bool seen_preempted = false;
473 /* XXX: should spin_on_preempted_owner do anything here? */
477 /* Attempt to steal the lock */
480 bool preempted = false;
482 val = READ_ONCE(lock->val);
483 if (val & _Q_MUST_Q_VAL)
487 if (unlikely(!(val & _Q_LOCKED_VAL))) {
489 if (__queued_spin_trylock_steal(lock))
493 preempted = yield_to_locked_owner(lock, val, paravirt);
496 if (paravirt && pv_sleepy_lock) {
498 if (val & _Q_SLEEPY_VAL) {
501 } else if (recently_sleepy()) {
505 if (pv_sleepy_lock_sticky && seen_preempted &&
506 !(val & _Q_SLEEPY_VAL)) {
507 if (try_set_sleepy(lock, val))
508 val |= _Q_SLEEPY_VAL;
513 seen_preempted = true;
515 if (!pv_spin_on_preempted_owner)
518 * pv_spin_on_preempted_owner don't increase iters
519 * while the owner is preempted -- we won't interfere
520 * with it by definition. This could introduce some
521 * latency issue if we continually observe preempted
522 * owners, but hopefully that's a rare corner case of
523 * a badly oversubscribed system.
528 } while (!steal_break(val, iters, paravirt, sleepy));
535 static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
537 struct qnodes *qnodesp;
538 struct qnode *next, *node;
540 bool seen_preempted = false;
544 int set_yield_cpu = -1;
547 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
549 qnodesp = this_cpu_ptr(&qnodes);
550 if (unlikely(qnodesp->count >= MAX_NODES)) {
552 while (!queued_spin_trylock(lock))
557 idx = qnodesp->count++;
559 * Ensure that we increment the head node->count before initialising
560 * the actual node. If the compiler is kind enough to reorder these
561 * stores, then an IRQ could overwrite our assignments.
564 node = &qnodesp->nodes[idx];
567 node->cpu = smp_processor_id();
568 node->yield_cpu = -1;
571 tail = encode_tail_cpu(node->cpu);
573 old = publish_tail_cpu(lock, tail);
576 * If there was a previous node; link it and wait until reaching the
577 * head of the waitqueue.
579 if (old & _Q_TAIL_CPU_MASK) {
580 struct qnode *prev = get_tail_qnode(lock, old);
582 /* Link @node into the waitqueue. */
583 WRITE_ONCE(prev->next, node);
585 /* Wait for mcs node lock to be released */
587 while (!node->locked) {
590 if (yield_to_prev(lock, node, old, paravirt))
591 seen_preempted = true;
596 /* Clear out stale propagated yield_cpu */
597 if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1)
598 node->yield_cpu = -1;
600 smp_rmb(); /* acquire barrier for the mcs lock */
603 * Generic qspinlocks have this prefetch here, but it seems
604 * like it could cause additional line transitions because
605 * the waiter will keep loading from it.
607 if (_Q_SPIN_PREFETCH_NEXT) {
608 next = READ_ONCE(node->next);
614 /* We're at the head of the waitqueue, wait for the lock. */
620 val = READ_ONCE(lock->val);
621 if (!(val & _Q_LOCKED_VAL))
625 if (paravirt && pv_sleepy_lock && maybe_stealers) {
627 if (val & _Q_SLEEPY_VAL) {
630 } else if (recently_sleepy()) {
634 if (pv_sleepy_lock_sticky && seen_preempted &&
635 !(val & _Q_SLEEPY_VAL)) {
636 if (try_set_sleepy(lock, val))
637 val |= _Q_SLEEPY_VAL;
641 propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
642 preempted = yield_head_to_locked_owner(lock, val, paravirt);
647 seen_preempted = true;
649 if (paravirt && preempted) {
652 if (!pv_spin_on_preempted_owner)
658 if (!mustq && iters >= get_head_spins(paravirt, sleepy)) {
661 val |= _Q_MUST_Q_VAL;
667 /* If we're the last queued, must clean up the tail. */
668 old = trylock_clean_tail(lock, tail);
669 if (unlikely(old & _Q_LOCKED_VAL)) {
670 BUG_ON(!maybe_stealers);
671 goto again; /* Can only be true if maybe_stealers. */
674 if ((old & _Q_TAIL_CPU_MASK) == tail)
675 goto release; /* We were the tail, no next. */
677 /* There is a next, must wait for node->next != NULL (MCS protocol) */
678 next = READ_ONCE(node->next);
681 while (!(next = READ_ONCE(node->next)))
688 * Unlock the next mcs waiter node. Release barrier is not required
689 * here because the acquirer is only accessing the lock word, and
690 * the acquire barrier we took the lock with orders that update vs
691 * this store to locked. The corresponding barrier is the smp_rmb()
692 * acquire barrier for mcs lock, above.
694 if (paravirt && pv_prod_head) {
695 int next_cpu = next->cpu;
696 WRITE_ONCE(next->locked, 1);
698 asm volatile("miso" ::: "memory");
699 if (vcpu_is_preempted(next_cpu))
702 WRITE_ONCE(next->locked, 1);
704 asm volatile("miso" ::: "memory");
708 qnodesp->count--; /* release the node */
711 void queued_spin_lock_slowpath(struct qspinlock *lock)
714 * This looks funny, but it induces the compiler to inline both
715 * sides of the branch rather than share code as when the condition
716 * is passed as the paravirt argument to the functions.
718 if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
719 if (try_to_steal_lock(lock, true)) {
723 queued_spin_lock_mcs_queue(lock, true);
725 if (try_to_steal_lock(lock, false)) {
729 queued_spin_lock_mcs_queue(lock, false);
732 EXPORT_SYMBOL(queued_spin_lock_slowpath);
734 #ifdef CONFIG_PARAVIRT_SPINLOCKS
735 void pv_spinlocks_init(void)
740 #include <linux/debugfs.h>
741 static int steal_spins_set(void *data, u64 val)
743 #if _Q_SPIN_TRY_LOCK_STEAL == 1
744 /* MAYBE_STEAL remains true */
747 static DEFINE_MUTEX(lock);
750 * The lock slow path has a !maybe_stealers case that can assume
751 * the head of queue will not see concurrent waiters. That waiter
752 * is unsafe in the presence of stealers, so must keep them away
757 if (val && !steal_spins) {
758 maybe_stealers = true;
759 /* wait for queue head waiter to go away */
762 } else if (!val && steal_spins) {
764 /* wait for all possible stealers to go away */
766 maybe_stealers = false;
776 static int steal_spins_get(void *data, u64 *val)
783 DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n");
785 static int remote_steal_spins_set(void *data, u64 val)
787 remote_steal_spins = val;
792 static int remote_steal_spins_get(void *data, u64 *val)
794 *val = remote_steal_spins;
799 DEFINE_SIMPLE_ATTRIBUTE(fops_remote_steal_spins, remote_steal_spins_get, remote_steal_spins_set, "%llu\n");
801 static int head_spins_set(void *data, u64 val)
808 static int head_spins_get(void *data, u64 *val)
815 DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, "%llu\n");
817 static int pv_yield_owner_set(void *data, u64 val)
819 pv_yield_owner = !!val;
824 static int pv_yield_owner_get(void *data, u64 *val)
826 *val = pv_yield_owner;
831 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n");
833 static int pv_yield_allow_steal_set(void *data, u64 val)
835 pv_yield_allow_steal = !!val;
840 static int pv_yield_allow_steal_get(void *data, u64 *val)
842 *val = pv_yield_allow_steal;
847 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n");
849 static int pv_spin_on_preempted_owner_set(void *data, u64 val)
851 pv_spin_on_preempted_owner = !!val;
856 static int pv_spin_on_preempted_owner_get(void *data, u64 *val)
858 *val = pv_spin_on_preempted_owner;
863 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_spin_on_preempted_owner, pv_spin_on_preempted_owner_get, pv_spin_on_preempted_owner_set, "%llu\n");
865 static int pv_sleepy_lock_set(void *data, u64 val)
867 pv_sleepy_lock = !!val;
872 static int pv_sleepy_lock_get(void *data, u64 *val)
874 *val = pv_sleepy_lock;
879 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock, pv_sleepy_lock_get, pv_sleepy_lock_set, "%llu\n");
881 static int pv_sleepy_lock_sticky_set(void *data, u64 val)
883 pv_sleepy_lock_sticky = !!val;
888 static int pv_sleepy_lock_sticky_get(void *data, u64 *val)
890 *val = pv_sleepy_lock_sticky;
895 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_sticky, pv_sleepy_lock_sticky_get, pv_sleepy_lock_sticky_set, "%llu\n");
897 static int pv_sleepy_lock_interval_ns_set(void *data, u64 val)
899 pv_sleepy_lock_interval_ns = val;
904 static int pv_sleepy_lock_interval_ns_get(void *data, u64 *val)
906 *val = pv_sleepy_lock_interval_ns;
911 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_interval_ns, pv_sleepy_lock_interval_ns_get, pv_sleepy_lock_interval_ns_set, "%llu\n");
913 static int pv_sleepy_lock_factor_set(void *data, u64 val)
915 pv_sleepy_lock_factor = val;
920 static int pv_sleepy_lock_factor_get(void *data, u64 *val)
922 *val = pv_sleepy_lock_factor;
927 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_factor, pv_sleepy_lock_factor_get, pv_sleepy_lock_factor_set, "%llu\n");
929 static int pv_yield_prev_set(void *data, u64 val)
931 pv_yield_prev = !!val;
936 static int pv_yield_prev_get(void *data, u64 *val)
938 *val = pv_yield_prev;
943 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n");
945 static int pv_yield_propagate_owner_set(void *data, u64 val)
947 pv_yield_propagate_owner = !!val;
952 static int pv_yield_propagate_owner_get(void *data, u64 *val)
954 *val = pv_yield_propagate_owner;
959 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n");
961 static int pv_prod_head_set(void *data, u64 val)
963 pv_prod_head = !!val;
968 static int pv_prod_head_get(void *data, u64 *val)
975 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_prod_head, pv_prod_head_get, pv_prod_head_set, "%llu\n");
977 static __init int spinlock_debugfs_init(void)
979 debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
980 debugfs_create_file("qspl_remote_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_remote_steal_spins);
981 debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins);
982 if (is_shared_processor()) {
983 debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner);
984 debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal);
985 debugfs_create_file("qspl_pv_spin_on_preempted_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_spin_on_preempted_owner);
986 debugfs_create_file("qspl_pv_sleepy_lock", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock);
987 debugfs_create_file("qspl_pv_sleepy_lock_sticky", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_sticky);
988 debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns);
989 debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor);
990 debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
991 debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner);
992 debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
997 device_initcall(spinlock_debugfs_init);