Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f233f7f1 PZI |
2 | #ifndef __ASM_QSPINLOCK_PARAVIRT_H |
3 | #define __ASM_QSPINLOCK_PARAVIRT_H | |
4 | ||
c3b03791 PZ |
5 | #include <asm/ibt.h> |
6 | ||
d7804530 WL |
7 | /* |
8 | * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit | |
9 | * registers. For i386, however, only 1 32-bit register needs to be saved | |
10 | * and restored. So an optimized version of __pv_queued_spin_unlock() is | |
11 | * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit. | |
12 | */ | |
13 | #ifdef CONFIG_64BIT | |
14 | ||
501f7f69 | 15 | __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text"); |
d7804530 | 16 | #define __pv_queued_spin_unlock __pv_queued_spin_unlock |
d7804530 WL |
17 | |
18 | /* | |
19 | * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock | |
20 | * which combines the registers saving trunk and the body of the following | |
501f7f69 NK |
21 | * C code. Note that it puts the code in the .spinlock.text section which |
22 | * is equivalent to adding __lockfunc in the C code: | |
d7804530 | 23 | * |
501f7f69 | 24 | * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock) |
d7804530 | 25 | * { |
625e88be | 26 | * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0); |
d7804530 WL |
27 | * |
28 | * if (likely(lockval == _Q_LOCKED_VAL)) | |
29 | * return; | |
30 | * pv_queued_spin_unlock_slowpath(lock, lockval); | |
31 | * } | |
32 | * | |
33 | * For x86-64, | |
34 | * rdi = lock (first argument) | |
35 | * rsi = lockval (second argument) | |
36 | * rdx = internal variable (set to 0) | |
37 | */ | |
f1a033cc JG |
38 | #define PV_UNLOCK_ASM \ |
39 | FRAME_BEGIN \ | |
40 | "push %rdx\n\t" \ | |
41 | "mov $0x1,%eax\n\t" \ | |
42 | "xor %edx,%edx\n\t" \ | |
43 | LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \ | |
44 | "cmp $0x1,%al\n\t" \ | |
45 | "jne .slowpath\n\t" \ | |
46 | "pop %rdx\n\t" \ | |
47 | FRAME_END \ | |
48 | ASM_RET \ | |
49 | ".slowpath:\n\t" \ | |
50 | "push %rsi\n\t" \ | |
51 | "movzbl %al,%esi\n\t" \ | |
52 | "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \ | |
53 | "pop %rsi\n\t" \ | |
54 | "pop %rdx\n\t" \ | |
16df4ff8 | 55 | FRAME_END |
f1a033cc JG |
56 | |
57 | DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, | |
58 | PV_UNLOCK_ASM, .spinlock.text); | |
d7804530 WL |
59 | |
60 | #else /* CONFIG_64BIT */ | |
61 | ||
501f7f69 NK |
62 | extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock); |
63 | __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text"); | |
f233f7f1 | 64 | |
d7804530 | 65 | #endif /* CONFIG_64BIT */ |
f233f7f1 | 66 | #endif |