Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / arch / x86 / include / asm / qspinlock_paravirt.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_QSPINLOCK_PARAVIRT_H
3#define __ASM_QSPINLOCK_PARAVIRT_H
4
5#include <asm/ibt.h>
6
7void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
8
9/*
10 * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
11 * registers. For i386, however, only 1 32-bit register needs to be saved
12 * and restored. So an optimized version of __pv_queued_spin_unlock() is
13 * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
14 */
15#ifdef CONFIG_64BIT
16
17__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
18#define __pv_queued_spin_unlock __pv_queued_spin_unlock
19
20/*
21 * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
22 * which combines the registers saving trunk and the body of the following
23 * C code. Note that it puts the code in the .spinlock.text section which
24 * is equivalent to adding __lockfunc in the C code:
25 *
26 * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
27 * {
28 * u8 lockval = _Q_LOCKED_VAL;
29 *
30 * if (try_cmpxchg(&lock->locked, &lockval, 0))
31 * return;
32 * pv_queued_spin_unlock_slowpath(lock, lockval);
33 * }
34 *
35 * For x86-64,
36 * rdi = lock (first argument)
37 * rsi = lockval (second argument)
38 * rdx = internal variable (set to 0)
39 */
40#define PV_UNLOCK_ASM \
41 FRAME_BEGIN \
42 "push %rdx\n\t" \
43 "mov $" __stringify(_Q_LOCKED_VAL) ",%eax\n\t" \
44 "xor %edx,%edx\n\t" \
45 LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
46 "jne .slowpath\n\t" \
47 "pop %rdx\n\t" \
48 FRAME_END \
49 ASM_RET \
50 ".slowpath:\n\t" \
51 "push %rsi\n\t" \
52 "movzbl %al,%esi\n\t" \
53 "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \
54 "pop %rsi\n\t" \
55 "pop %rdx\n\t" \
56 FRAME_END
57
58DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock,
59 PV_UNLOCK_ASM, .spinlock.text);
60
61#else /* CONFIG_64BIT */
62
63extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
64__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
65
66#endif /* CONFIG_64BIT */
67#endif