Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / arch / x86 / include / asm / irqflags.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _X86_IRQFLAGS_H_
3#define _X86_IRQFLAGS_H_
4
5#include <asm/processor-flags.h>
6
7#ifndef __ASSEMBLER__
8
9#include <asm/nospec-branch.h>
10
11/*
12 * Interrupt control:
13 */
14
15/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
16extern inline unsigned long native_save_fl(void);
17extern __always_inline unsigned long native_save_fl(void)
18{
19 unsigned long flags;
20
21 /*
22 * "=rm" is safe here, because "pop" adjusts the stack before
23 * it evaluates its effective address -- this is part of the
24 * documented behavior of the "pop" instruction.
25 */
26 asm volatile("# __raw_save_flags\n\t"
27 "pushf ; pop %0"
28 : "=rm" (flags)
29 : /* no input */
30 : "memory");
31
32 return flags;
33}
34
35static __always_inline void native_irq_disable(void)
36{
37 asm volatile("cli": : :"memory");
38}
39
40static __always_inline void native_irq_enable(void)
41{
42 asm volatile("sti": : :"memory");
43}
44
45static __always_inline void native_safe_halt(void)
46{
47 mds_idle_clear_cpu_buffers();
48 asm volatile("sti; hlt": : :"memory");
49}
50
51static __always_inline void native_halt(void)
52{
53 mds_idle_clear_cpu_buffers();
54 asm volatile("hlt": : :"memory");
55}
56
57static __always_inline int native_irqs_disabled_flags(unsigned long flags)
58{
59 return !(flags & X86_EFLAGS_IF);
60}
61
62static __always_inline unsigned long native_local_irq_save(void)
63{
64 unsigned long flags = native_save_fl();
65
66 native_irq_disable();
67
68 return flags;
69}
70
71static __always_inline void native_local_irq_restore(unsigned long flags)
72{
73 if (!native_irqs_disabled_flags(flags))
74 native_irq_enable();
75}
76
77#endif
78
79#ifndef CONFIG_PARAVIRT
80#ifndef __ASSEMBLY__
81/*
82 * Used in the idle loop; sti takes one instruction cycle
83 * to complete:
84 */
85static __always_inline void arch_safe_halt(void)
86{
87 native_safe_halt();
88}
89
90/*
91 * Used when interrupts are already enabled or to
92 * shutdown the processor:
93 */
94static __always_inline void halt(void)
95{
96 native_halt();
97}
98#endif /* __ASSEMBLY__ */
99#endif /* CONFIG_PARAVIRT */
100
101#ifdef CONFIG_PARAVIRT_XXL
102#include <asm/paravirt.h>
103#else
104#ifndef __ASSEMBLER__
105#include <linux/types.h>
106
107static __always_inline unsigned long arch_local_save_flags(void)
108{
109 return native_save_fl();
110}
111
112static __always_inline void arch_local_irq_disable(void)
113{
114 native_irq_disable();
115}
116
117static __always_inline void arch_local_irq_enable(void)
118{
119 native_irq_enable();
120}
121
122/*
123 * For spinlocks, etc:
124 */
125static __always_inline unsigned long arch_local_irq_save(void)
126{
127 unsigned long flags = arch_local_save_flags();
128 arch_local_irq_disable();
129 return flags;
130}
131#else
132
133#ifdef CONFIG_X86_64
134#ifdef CONFIG_DEBUG_ENTRY
135#define SAVE_FLAGS pushfq; popq %rax
136#endif
137
138#endif
139
140#endif /* __ASSEMBLER__ */
141#endif /* CONFIG_PARAVIRT_XXL */
142
143#ifndef __ASSEMBLER__
144static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
145{
146 return !(flags & X86_EFLAGS_IF);
147}
148
149static __always_inline int arch_irqs_disabled(void)
150{
151 unsigned long flags = arch_local_save_flags();
152
153 return arch_irqs_disabled_flags(flags);
154}
155
156static __always_inline void arch_local_irq_restore(unsigned long flags)
157{
158 if (!arch_irqs_disabled_flags(flags))
159 arch_local_irq_enable();
160}
161#endif /* !__ASSEMBLER__ */
162
163#endif