Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-block.git] / arch / x86 / include / asm / irqflags.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _X86_IRQFLAGS_H_
3#define _X86_IRQFLAGS_H_
4
5#include <asm/processor-flags.h>
6
7#ifndef __ASSEMBLY__
8
9#include <asm/nospec-branch.h>
10
11/*
12 * Interrupt control:
13 */
14
15/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
16extern inline unsigned long native_save_fl(void);
17extern __always_inline unsigned long native_save_fl(void)
18{
19 unsigned long flags;
20
21 /*
22 * "=rm" is safe here, because "pop" adjusts the stack before
23 * it evaluates its effective address -- this is part of the
24 * documented behavior of the "pop" instruction.
25 */
26 asm volatile("# __raw_save_flags\n\t"
27 "pushf ; pop %0"
28 : "=rm" (flags)
29 : /* no input */
30 : "memory");
31
32 return flags;
33}
34
35static __always_inline void native_irq_disable(void)
36{
37 asm volatile("cli": : :"memory");
38}
39
40static __always_inline void native_irq_enable(void)
41{
42 asm volatile("sti": : :"memory");
43}
44
45static __always_inline void native_safe_halt(void)
46{
47 mds_idle_clear_cpu_buffers();
48 asm volatile("sti; hlt": : :"memory");
49}
50
51static __always_inline void native_halt(void)
52{
53 mds_idle_clear_cpu_buffers();
54 asm volatile("hlt": : :"memory");
55}
56
57static __always_inline int native_irqs_disabled_flags(unsigned long flags)
58{
59 return !(flags & X86_EFLAGS_IF);
60}
61
62static __always_inline unsigned long native_local_irq_save(void)
63{
64 unsigned long flags = native_save_fl();
65
66 native_irq_disable();
67
68 return flags;
69}
70
71static __always_inline void native_local_irq_restore(unsigned long flags)
72{
73 if (!native_irqs_disabled_flags(flags))
74 native_irq_enable();
75}
76
77#endif
78
79#ifdef CONFIG_PARAVIRT_XXL
80#include <asm/paravirt.h>
81#else
82#ifndef __ASSEMBLY__
83#include <linux/types.h>
84
85static __always_inline unsigned long arch_local_save_flags(void)
86{
87 return native_save_fl();
88}
89
90static __always_inline void arch_local_irq_disable(void)
91{
92 native_irq_disable();
93}
94
95static __always_inline void arch_local_irq_enable(void)
96{
97 native_irq_enable();
98}
99
100/*
101 * Used in the idle loop; sti takes one instruction cycle
102 * to complete:
103 */
104static __always_inline void arch_safe_halt(void)
105{
106 native_safe_halt();
107}
108
109/*
110 * Used when interrupts are already enabled or to
111 * shutdown the processor:
112 */
113static __always_inline void halt(void)
114{
115 native_halt();
116}
117
118/*
119 * For spinlocks, etc:
120 */
121static __always_inline unsigned long arch_local_irq_save(void)
122{
123 unsigned long flags = arch_local_save_flags();
124 arch_local_irq_disable();
125 return flags;
126}
127#else
128
129#ifdef CONFIG_X86_64
130#ifdef CONFIG_DEBUG_ENTRY
131#define SAVE_FLAGS pushfq; popq %rax
132#endif
133
134#endif
135
136#endif /* __ASSEMBLY__ */
137#endif /* CONFIG_PARAVIRT_XXL */
138
139#ifndef __ASSEMBLY__
140static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
141{
142 return !(flags & X86_EFLAGS_IF);
143}
144
145static __always_inline int arch_irqs_disabled(void)
146{
147 unsigned long flags = arch_local_save_flags();
148
149 return arch_irqs_disabled_flags(flags);
150}
151
152static __always_inline void arch_local_irq_restore(unsigned long flags)
153{
154 if (!arch_irqs_disabled_flags(flags))
155 arch_local_irq_enable();
156}
157#endif /* !__ASSEMBLY__ */
158
159#endif