Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PROCESSOR_H |
3 | #define _ASM_X86_PROCESSOR_H | |
c758ecf6 | 4 | |
053de044 GOC |
5 | #include <asm/processor-flags.h> |
6 | ||
683e0253 GOC |
7 | /* Forward declaration, a strange C thing */ |
8 | struct task_struct; | |
9 | struct mm_struct; | |
9fda6a06 | 10 | struct vm86; |
683e0253 | 11 | |
2f66dcc9 GOC |
12 | #include <asm/math_emu.h> |
13 | #include <asm/segment.h> | |
2f66dcc9 | 14 | #include <asm/types.h> |
decb4c41 | 15 | #include <uapi/asm/sigcontext.h> |
2f66dcc9 | 16 | #include <asm/current.h> |
cd4d09ec | 17 | #include <asm/cpufeatures.h> |
2f66dcc9 | 18 | #include <asm/page.h> |
54321d94 | 19 | #include <asm/pgtable_types.h> |
5300db88 | 20 | #include <asm/percpu.h> |
2f66dcc9 GOC |
21 | #include <asm/msr.h> |
22 | #include <asm/desc_defs.h> | |
bd61643e | 23 | #include <asm/nops.h> |
f05e798a | 24 | #include <asm/special_insns.h> |
14b9675a | 25 | #include <asm/fpu/types.h> |
76846bf3 | 26 | #include <asm/unwind_hints.h> |
4d46a89e | 27 | |
2f66dcc9 | 28 | #include <linux/personality.h> |
5300db88 | 29 | #include <linux/cache.h> |
2f66dcc9 | 30 | #include <linux/threads.h> |
5cbc19a9 | 31 | #include <linux/math64.h> |
faa4602e | 32 | #include <linux/err.h> |
f05e798a | 33 | #include <linux/irqflags.h> |
21729f81 | 34 | #include <linux/mem_encrypt.h> |
f05e798a DH |
35 | |
36 | /* | |
37 | * We handle most unaligned accesses in hardware. On the other hand | |
38 | * unaligned DMA can be quite expensive on some Nehalem processors. | |
39 | * | |
40 | * Based on this we disable the IP header alignment in network drivers. | |
41 | */ | |
42 | #define NET_IP_ALIGN 0 | |
c72dcf83 | 43 | |
b332828c | 44 | #define HBP_NUM 4 |
0ccb8acc | 45 | |
b8c1b8ea IM |
46 | /* |
47 | * These alignment constraints are for performance in the vSMP case, | |
48 | * but in the task_struct case we must also meet hardware imposed | |
49 | * alignment requirements of the FPU state: | |
50 | */ | |
dbcb4660 | 51 | #ifdef CONFIG_X86_VSMP |
4d46a89e IM |
52 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
53 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | |
dbcb4660 | 54 | #else |
b8c1b8ea | 55 | # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state) |
4d46a89e | 56 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
dbcb4660 GOC |
57 | #endif |
58 | ||
e0ba94f1 AS |
59 | enum tlb_infos { |
60 | ENTRIES, | |
61 | NR_INFO | |
62 | }; | |
63 | ||
64 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; | |
65 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
66 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
67 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
68 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
69 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 70 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
c4211f42 | 71 | |
5300db88 GOC |
72 | /* |
73 | * CPU type and hardware bug flags. Kept separately for each CPU. | |
04402116 | 74 | * Members of this structure are referenced in head_32.S, so think twice |
5300db88 GOC |
75 | * before touching them. [mj] |
76 | */ | |
77 | ||
78 | struct cpuinfo_x86 { | |
4d46a89e IM |
79 | __u8 x86; /* CPU family */ |
80 | __u8 x86_vendor; /* CPU vendor */ | |
81 | __u8 x86_model; | |
b399151c | 82 | __u8 x86_stepping; |
6415813b | 83 | #ifdef CONFIG_X86_64 |
4d46a89e | 84 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
b1882e68 | 85 | int x86_tlbsize; |
13c6c532 | 86 | #endif |
4d46a89e IM |
87 | __u8 x86_virt_bits; |
88 | __u8 x86_phys_bits; | |
89 | /* CPUID returned core id bits: */ | |
90 | __u8 x86_coreid_bits; | |
79a8b9aa | 91 | __u8 cu_id; |
4d46a89e IM |
92 | /* Max extended CPUID function supported: */ |
93 | __u32 extended_cpuid_level; | |
4d46a89e IM |
94 | /* Maximum supported CPUID level, -1=no CPUID: */ |
95 | int cpuid_level; | |
65fc985b | 96 | __u32 x86_capability[NCAPINTS + NBUGINTS]; |
4d46a89e IM |
97 | char x86_vendor_id[16]; |
98 | char x86_model_id[64]; | |
99 | /* in KB - valid for CPUS which support this call: */ | |
24dbc600 | 100 | unsigned int x86_cache_size; |
4d46a89e | 101 | int x86_cache_alignment; /* In bytes */ |
cbc82b17 PWJ |
102 | /* Cache QoS architectural values: */ |
103 | int x86_cache_max_rmid; /* max index */ | |
104 | int x86_cache_occ_scale; /* scale to bytes */ | |
4d46a89e IM |
105 | int x86_power; |
106 | unsigned long loops_per_jiffy; | |
4d46a89e | 107 | /* cpuid returned max cores value: */ |
7745f03e | 108 | u16 x86_max_cores; |
4d46a89e | 109 | u16 apicid; |
01aaea1a | 110 | u16 initial_apicid; |
4d46a89e | 111 | u16 x86_clflush_size; |
4d46a89e IM |
112 | /* number of cores as seen by the OS: */ |
113 | u16 booted_cores; | |
114 | /* Physical processor id: */ | |
115 | u16 phys_proc_id; | |
1f12e32f TG |
116 | /* Logical processor id: */ |
117 | u16 logical_proc_id; | |
4d46a89e IM |
118 | /* Core id: */ |
119 | u16 cpu_core_id; | |
7745f03e | 120 | u16 cpu_die_id; |
212bf4fd | 121 | u16 logical_die_id; |
4d46a89e IM |
122 | /* Index into per_cpu list: */ |
123 | u16 cpu_index; | |
506ed6b5 | 124 | u32 microcode; |
cc51e542 AK |
125 | /* Address space bits used by the cache internally */ |
126 | u8 x86_cache_bits; | |
30bb9811 | 127 | unsigned initialized : 1; |
3859a271 | 128 | } __randomize_layout; |
5300db88 | 129 | |
47f10a36 HC |
130 | struct cpuid_regs { |
131 | u32 eax, ebx, ecx, edx; | |
132 | }; | |
133 | ||
134 | enum cpuid_regs_idx { | |
135 | CPUID_EAX = 0, | |
136 | CPUID_EBX, | |
137 | CPUID_ECX, | |
138 | CPUID_EDX, | |
139 | }; | |
140 | ||
4d46a89e IM |
141 | #define X86_VENDOR_INTEL 0 |
142 | #define X86_VENDOR_CYRIX 1 | |
143 | #define X86_VENDOR_AMD 2 | |
144 | #define X86_VENDOR_UMC 3 | |
4d46a89e IM |
145 | #define X86_VENDOR_CENTAUR 5 |
146 | #define X86_VENDOR_TRANSMETA 7 | |
147 | #define X86_VENDOR_NSC 8 | |
c9661c1e | 148 | #define X86_VENDOR_HYGON 9 |
761fdd5e TW |
149 | #define X86_VENDOR_ZHAOXIN 10 |
150 | #define X86_VENDOR_NUM 11 | |
4d46a89e IM |
151 | |
152 | #define X86_VENDOR_UNKNOWN 0xff | |
5300db88 | 153 | |
1a53905a GOC |
154 | /* |
155 | * capabilities of CPUs | |
156 | */ | |
4d46a89e IM |
157 | extern struct cpuinfo_x86 boot_cpu_data; |
158 | extern struct cpuinfo_x86 new_cpu_data; | |
159 | ||
7fb983b4 | 160 | extern struct x86_hw_tss doublefault_tss; |
6cbd2171 TG |
161 | extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; |
162 | extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; | |
5300db88 GOC |
163 | |
164 | #ifdef CONFIG_SMP | |
2c773dd3 | 165 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
5300db88 | 166 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
5300db88 | 167 | #else |
7b543a53 | 168 | #define cpu_info boot_cpu_data |
5300db88 | 169 | #define cpu_data(cpu) boot_cpu_data |
5300db88 GOC |
170 | #endif |
171 | ||
1c6c727d JS |
172 | extern const struct seq_operations cpuinfo_op; |
173 | ||
4d46a89e IM |
174 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
175 | ||
176 | extern void cpu_detect(struct cpuinfo_x86 *c); | |
1a53905a | 177 | |
9df95169 | 178 | static inline unsigned long long l1tf_pfn_limit(void) |
17dbca11 | 179 | { |
cc51e542 | 180 | return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); |
17dbca11 AK |
181 | } |
182 | ||
f580366f | 183 | extern void early_cpu_init(void); |
1a53905a GOC |
184 | extern void identify_boot_cpu(void); |
185 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | |
5300db88 | 186 | extern void print_cpu_info(struct cpuinfo_x86 *); |
21c3fcf3 | 187 | void print_cpu_msr(struct cpuinfo_x86 *); |
1a53905a | 188 | |
d288e1cf FY |
189 | #ifdef CONFIG_X86_32 |
190 | extern int have_cpuid_p(void); | |
191 | #else | |
192 | static inline int have_cpuid_p(void) | |
193 | { | |
194 | return 1; | |
195 | } | |
196 | #endif | |
c758ecf6 | 197 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
4d46a89e | 198 | unsigned int *ecx, unsigned int *edx) |
c758ecf6 GOC |
199 | { |
200 | /* ecx is often an input as well as an output. */ | |
45a94d7c | 201 | asm volatile("cpuid" |
cca2e6f8 JP |
202 | : "=a" (*eax), |
203 | "=b" (*ebx), | |
204 | "=c" (*ecx), | |
205 | "=d" (*edx) | |
506ed6b5 AK |
206 | : "0" (*eax), "2" (*ecx) |
207 | : "memory"); | |
c758ecf6 GOC |
208 | } |
209 | ||
5dedade6 BP |
210 | #define native_cpuid_reg(reg) \ |
211 | static inline unsigned int native_cpuid_##reg(unsigned int op) \ | |
212 | { \ | |
213 | unsigned int eax = op, ebx, ecx = 0, edx; \ | |
214 | \ | |
215 | native_cpuid(&eax, &ebx, &ecx, &edx); \ | |
216 | \ | |
217 | return reg; \ | |
218 | } | |
219 | ||
220 | /* | |
221 | * Native CPUID functions returning a single datum. | |
222 | */ | |
223 | native_cpuid_reg(eax) | |
224 | native_cpuid_reg(ebx) | |
225 | native_cpuid_reg(ecx) | |
226 | native_cpuid_reg(edx) | |
227 | ||
6c690ee1 AL |
228 | /* |
229 | * Friendlier CR3 helpers. | |
230 | */ | |
231 | static inline unsigned long read_cr3_pa(void) | |
232 | { | |
233 | return __read_cr3() & CR3_ADDR_MASK; | |
234 | } | |
235 | ||
eef9c4ab TL |
236 | static inline unsigned long native_read_cr3_pa(void) |
237 | { | |
238 | return __native_read_cr3() & CR3_ADDR_MASK; | |
239 | } | |
240 | ||
c72dcf83 GOC |
241 | static inline void load_cr3(pgd_t *pgdir) |
242 | { | |
21729f81 | 243 | write_cr3(__sme_pa(pgdir)); |
c72dcf83 | 244 | } |
c758ecf6 | 245 | |
7fb983b4 AL |
246 | /* |
247 | * Note that while the legacy 'TSS' name comes from 'Task State Segment', | |
248 | * on modern x86 CPUs the TSS also holds information important to 64-bit mode, | |
249 | * unrelated to the task-switch mechanism: | |
250 | */ | |
ca241c75 GOC |
251 | #ifdef CONFIG_X86_32 |
252 | /* This is the TSS defined by the hardware. */ | |
253 | struct x86_hw_tss { | |
4d46a89e IM |
254 | unsigned short back_link, __blh; |
255 | unsigned long sp0; | |
256 | unsigned short ss0, __ss0h; | |
cf9328cc | 257 | unsigned long sp1; |
76e4c490 AL |
258 | |
259 | /* | |
cf9328cc AL |
260 | * We don't use ring 1, so ss1 is a convenient scratch space in |
261 | * the same cacheline as sp0. We use ss1 to cache the value in | |
262 | * MSR_IA32_SYSENTER_CS. When we context switch | |
263 | * MSR_IA32_SYSENTER_CS, we first check if the new value being | |
264 | * written matches ss1, and, if it's not, then we wrmsr the new | |
265 | * value and update ss1. | |
76e4c490 | 266 | * |
cf9328cc AL |
267 | * The only reason we context switch MSR_IA32_SYSENTER_CS is |
268 | * that we set it to zero in vm86 tasks to avoid corrupting the | |
269 | * stack if we were to go through the sysenter path from vm86 | |
270 | * mode. | |
76e4c490 | 271 | */ |
76e4c490 AL |
272 | unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
273 | ||
274 | unsigned short __ss1h; | |
4d46a89e IM |
275 | unsigned long sp2; |
276 | unsigned short ss2, __ss2h; | |
277 | unsigned long __cr3; | |
278 | unsigned long ip; | |
279 | unsigned long flags; | |
280 | unsigned long ax; | |
281 | unsigned long cx; | |
282 | unsigned long dx; | |
283 | unsigned long bx; | |
284 | unsigned long sp; | |
285 | unsigned long bp; | |
286 | unsigned long si; | |
287 | unsigned long di; | |
288 | unsigned short es, __esh; | |
289 | unsigned short cs, __csh; | |
290 | unsigned short ss, __ssh; | |
291 | unsigned short ds, __dsh; | |
292 | unsigned short fs, __fsh; | |
293 | unsigned short gs, __gsh; | |
294 | unsigned short ldt, __ldth; | |
295 | unsigned short trace; | |
296 | unsigned short io_bitmap_base; | |
297 | ||
ca241c75 GOC |
298 | } __attribute__((packed)); |
299 | #else | |
300 | struct x86_hw_tss { | |
4d46a89e IM |
301 | u32 reserved1; |
302 | u64 sp0; | |
9aaefe7b AL |
303 | |
304 | /* | |
305 | * We store cpu_current_top_of_stack in sp1 so it's always accessible. | |
306 | * Linux does not use ring 1, so sp1 is not otherwise needed. | |
307 | */ | |
4d46a89e | 308 | u64 sp1; |
9aaefe7b | 309 | |
98f05b51 AL |
310 | /* |
311 | * Since Linux does not use ring 2, the 'sp2' slot is unused by | |
312 | * hardware. entry_SYSCALL_64 uses it as scratch space to stash | |
313 | * the user RSP value. | |
314 | */ | |
4d46a89e | 315 | u64 sp2; |
98f05b51 | 316 | |
4d46a89e IM |
317 | u64 reserved2; |
318 | u64 ist[7]; | |
319 | u32 reserved3; | |
320 | u32 reserved4; | |
321 | u16 reserved5; | |
322 | u16 io_bitmap_base; | |
323 | ||
d3273dea | 324 | } __attribute__((packed)); |
ca241c75 GOC |
325 | #endif |
326 | ||
327 | /* | |
4d46a89e | 328 | * IO-bitmap sizes: |
ca241c75 | 329 | */ |
4d46a89e IM |
330 | #define IO_BITMAP_BITS 65536 |
331 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | |
332 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | |
7fb983b4 | 333 | #define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) |
4d46a89e | 334 | #define INVALID_IO_BITMAP_OFFSET 0x8000 |
ca241c75 | 335 | |
4fe2d8b1 | 336 | struct entry_stack { |
0f9a4810 AL |
337 | unsigned long words[64]; |
338 | }; | |
339 | ||
4fe2d8b1 DH |
340 | struct entry_stack_page { |
341 | struct entry_stack stack; | |
c482feef | 342 | } __aligned(PAGE_SIZE); |
1a935bc3 | 343 | |
ca241c75 | 344 | struct tss_struct { |
4d46a89e | 345 | /* |
1a935bc3 AL |
346 | * The fixed hardware portion. This must not cross a page boundary |
347 | * at risk of violating the SDM's advice and potentially triggering | |
348 | * errata. | |
4d46a89e IM |
349 | */ |
350 | struct x86_hw_tss x86_tss; | |
ca241c75 GOC |
351 | |
352 | /* | |
353 | * The extra 1 is there because the CPU will access an | |
354 | * additional byte beyond the end of the IO permission | |
355 | * bitmap. The extra byte must be all 1 bits, and must | |
356 | * be within the limit. | |
357 | */ | |
4d46a89e | 358 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
1a935bc3 | 359 | } __aligned(PAGE_SIZE); |
4d46a89e | 360 | |
c482feef | 361 | DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); |
ca241c75 | 362 | |
4f53ab14 AL |
363 | /* |
364 | * sizeof(unsigned long) coming from an extra "long" at the end | |
365 | * of the iobitmap. | |
366 | * | |
367 | * -1? seg base+limit should be pointing to the address of the | |
368 | * last valid byte | |
369 | */ | |
370 | #define __KERNEL_TSS_LIMIT \ | |
371 | (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1) | |
372 | ||
e6401c13 AL |
373 | /* Per CPU interrupt stacks */ |
374 | struct irq_stack { | |
375 | char stack[IRQ_STACK_SIZE]; | |
376 | } __aligned(IRQ_STACK_SIZE); | |
377 | ||
378 | DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); | |
379 | ||
a7fcf28d AL |
380 | #ifdef CONFIG_X86_32 |
381 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); | |
9aaefe7b | 382 | #else |
c482feef AL |
383 | /* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */ |
384 | #define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1 | |
a7fcf28d AL |
385 | #endif |
386 | ||
fe676203 | 387 | #ifdef CONFIG_X86_64 |
e6401c13 | 388 | struct fixed_percpu_data { |
947e76cd BG |
389 | /* |
390 | * GCC hardcodes the stack canary as %gs:40. Since the | |
391 | * irq_stack is the object at %gs:0, we reserve the bottom | |
392 | * 48 bytes of the irq stack for the canary. | |
393 | */ | |
e6401c13 AL |
394 | char gs_base[40]; |
395 | unsigned long stack_canary; | |
947e76cd BG |
396 | }; |
397 | ||
e6401c13 AL |
398 | DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible; |
399 | DECLARE_INIT_PER_CPU(fixed_percpu_data); | |
2add8e23 | 400 | |
35060ed6 VK |
401 | static inline unsigned long cpu_kernelmode_gs_base(int cpu) |
402 | { | |
e6401c13 | 403 | return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); |
35060ed6 VK |
404 | } |
405 | ||
9766cdbc | 406 | DECLARE_PER_CPU(unsigned int, irq_count); |
9766cdbc | 407 | extern asmlinkage void ignore_sysret(void); |
42b933b5 VK |
408 | |
409 | #if IS_ENABLED(CONFIG_KVM) | |
410 | /* Save actual FS/GS selectors and bases to current->thread */ | |
411 | void save_fsgs_for_kvm(void); | |
412 | #endif | |
60a5317f | 413 | #else /* X86_64 */ |
050e9baa | 414 | #ifdef CONFIG_STACKPROTECTOR |
1ea0d14e JF |
415 | /* |
416 | * Make sure stack canary segment base is cached-aligned: | |
417 | * "For Intel Atom processors, avoid non zero segment base address | |
418 | * that is not aligned to cache line boundary at all cost." | |
419 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) | |
420 | */ | |
421 | struct stack_canary { | |
422 | char __pad[20]; /* canary at %gs:20 */ | |
423 | unsigned long canary; | |
424 | }; | |
53f82452 | 425 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
96a388de | 426 | #endif |
e6401c13 | 427 | /* Per CPU softirq stack pointer */ |
a754fe2b | 428 | DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr); |
60a5317f | 429 | #endif /* X86_64 */ |
c758ecf6 | 430 | |
bf15a8cf | 431 | extern unsigned int fpu_kernel_xstate_size; |
a1141e0b | 432 | extern unsigned int fpu_user_xstate_size; |
683e0253 | 433 | |
24f1e32c FW |
434 | struct perf_event; |
435 | ||
13d4ea09 AL |
436 | typedef struct { |
437 | unsigned long seg; | |
438 | } mm_segment_t; | |
439 | ||
cb38d377 | 440 | struct thread_struct { |
4d46a89e IM |
441 | /* Cached TLS descriptors: */ |
442 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
d375cf15 | 443 | #ifdef CONFIG_X86_32 |
4d46a89e | 444 | unsigned long sp0; |
d375cf15 | 445 | #endif |
4d46a89e | 446 | unsigned long sp; |
cb38d377 | 447 | #ifdef CONFIG_X86_32 |
4d46a89e | 448 | unsigned long sysenter_cs; |
cb38d377 | 449 | #else |
4d46a89e IM |
450 | unsigned short es; |
451 | unsigned short ds; | |
452 | unsigned short fsindex; | |
453 | unsigned short gsindex; | |
cb38d377 | 454 | #endif |
b9d989c7 | 455 | |
d756f4ad | 456 | #ifdef CONFIG_X86_64 |
296f781a AL |
457 | unsigned long fsbase; |
458 | unsigned long gsbase; | |
459 | #else | |
460 | /* | |
461 | * XXX: this could presumably be unsigned short. Alternatively, | |
462 | * 32-bit kernels could be taught to use fsindex instead. | |
463 | */ | |
464 | unsigned long fs; | |
465 | unsigned long gs; | |
d756f4ad | 466 | #endif |
c5bedc68 | 467 | |
24f1e32c FW |
468 | /* Save middle states of ptrace breakpoints */ |
469 | struct perf_event *ptrace_bps[HBP_NUM]; | |
470 | /* Debug status used for traps, single steps, etc... */ | |
471 | unsigned long debugreg6; | |
326264a0 FW |
472 | /* Keep track of the exact dr7 value set by the user */ |
473 | unsigned long ptrace_dr7; | |
4d46a89e IM |
474 | /* Fault info: */ |
475 | unsigned long cr2; | |
51e7dc70 | 476 | unsigned long trap_nr; |
4d46a89e | 477 | unsigned long error_code; |
9fda6a06 | 478 | #ifdef CONFIG_VM86 |
4d46a89e | 479 | /* Virtual 86 mode info */ |
9fda6a06 | 480 | struct vm86 *vm86; |
cb38d377 | 481 | #endif |
4d46a89e IM |
482 | /* IO permissions: */ |
483 | unsigned long *io_bitmap_ptr; | |
484 | unsigned long iopl; | |
485 | /* Max allowed port in the bitmap, in bytes: */ | |
486 | unsigned io_bitmap_max; | |
0c8c0f03 | 487 | |
13d4ea09 AL |
488 | mm_segment_t addr_limit; |
489 | ||
2a53ccbc | 490 | unsigned int sig_on_uaccess_err:1; |
dfa9a942 AL |
491 | unsigned int uaccess_err:1; /* uaccess failed */ |
492 | ||
0c8c0f03 DH |
493 | /* Floating point and extended processor state */ |
494 | struct fpu fpu; | |
495 | /* | |
496 | * WARNING: 'fpu' is dynamically-sized. It *MUST* be at | |
497 | * the end. | |
498 | */ | |
cb38d377 GOC |
499 | }; |
500 | ||
f7d83c1c KC |
501 | /* Whitelist the FPU state from the task_struct for hardened usercopy. */ |
502 | static inline void arch_thread_struct_whitelist(unsigned long *offset, | |
503 | unsigned long *size) | |
504 | { | |
505 | *offset = offsetof(struct thread_struct, fpu.state); | |
506 | *size = fpu_kernel_xstate_size; | |
507 | } | |
508 | ||
b9d989c7 AL |
509 | /* |
510 | * Thread-synchronous status. | |
511 | * | |
512 | * This is different from the flags in that nobody else | |
513 | * ever touches our thread-synchronous status, so we don't | |
514 | * have to worry about atomic accesses. | |
515 | */ | |
516 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | |
517 | ||
62d7d7ed GOC |
518 | /* |
519 | * Set IOPL bits in EFLAGS from given mask | |
520 | */ | |
521 | static inline void native_set_iopl_mask(unsigned mask) | |
522 | { | |
523 | #ifdef CONFIG_X86_32 | |
524 | unsigned int reg; | |
4d46a89e | 525 | |
cca2e6f8 JP |
526 | asm volatile ("pushfl;" |
527 | "popl %0;" | |
528 | "andl %1, %0;" | |
529 | "orl %2, %0;" | |
530 | "pushl %0;" | |
531 | "popfl" | |
532 | : "=&r" (reg) | |
533 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | |
62d7d7ed GOC |
534 | #endif |
535 | } | |
536 | ||
4d46a89e | 537 | static inline void |
da51da18 | 538 | native_load_sp0(unsigned long sp0) |
7818a1e0 | 539 | { |
c482feef | 540 | this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); |
7818a1e0 | 541 | } |
1b46cbe0 | 542 | |
e801f864 GOC |
543 | static inline void native_swapgs(void) |
544 | { | |
545 | #ifdef CONFIG_X86_64 | |
546 | asm volatile("swapgs" ::: "memory"); | |
547 | #endif | |
548 | } | |
549 | ||
a7fcf28d | 550 | static inline unsigned long current_top_of_stack(void) |
8ef46a67 | 551 | { |
9aaefe7b AL |
552 | /* |
553 | * We can't read directly from tss.sp0: sp0 on x86_32 is special in | |
554 | * and around vm86 mode and sp0 on x86_64 is special because of the | |
555 | * entry trampoline. | |
556 | */ | |
a7fcf28d | 557 | return this_cpu_read_stable(cpu_current_top_of_stack); |
8ef46a67 AL |
558 | } |
559 | ||
3383642c AL |
560 | static inline bool on_thread_stack(void) |
561 | { | |
562 | return (unsigned long)(current_top_of_stack() - | |
563 | current_stack_pointer) < THREAD_SIZE; | |
564 | } | |
565 | ||
9bad5658 | 566 | #ifdef CONFIG_PARAVIRT_XXL |
7818a1e0 GOC |
567 | #include <asm/paravirt.h> |
568 | #else | |
4d46a89e | 569 | #define __cpuid native_cpuid |
1b46cbe0 | 570 | |
da51da18 | 571 | static inline void load_sp0(unsigned long sp0) |
7818a1e0 | 572 | { |
da51da18 | 573 | native_load_sp0(sp0); |
7818a1e0 GOC |
574 | } |
575 | ||
62d7d7ed | 576 | #define set_iopl_mask native_set_iopl_mask |
9bad5658 | 577 | #endif /* CONFIG_PARAVIRT_XXL */ |
1b46cbe0 | 578 | |
683e0253 GOC |
579 | /* Free all resources held by a thread. */ |
580 | extern void release_thread(struct task_struct *); | |
581 | ||
683e0253 | 582 | unsigned long get_wchan(struct task_struct *p); |
c758ecf6 GOC |
583 | |
584 | /* | |
585 | * Generic CPUID function | |
586 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | |
587 | * resulting in stale register contents being returned. | |
588 | */ | |
589 | static inline void cpuid(unsigned int op, | |
590 | unsigned int *eax, unsigned int *ebx, | |
591 | unsigned int *ecx, unsigned int *edx) | |
592 | { | |
593 | *eax = op; | |
594 | *ecx = 0; | |
595 | __cpuid(eax, ebx, ecx, edx); | |
596 | } | |
597 | ||
598 | /* Some CPUID calls want 'count' to be placed in ecx */ | |
599 | static inline void cpuid_count(unsigned int op, int count, | |
600 | unsigned int *eax, unsigned int *ebx, | |
601 | unsigned int *ecx, unsigned int *edx) | |
602 | { | |
603 | *eax = op; | |
604 | *ecx = count; | |
605 | __cpuid(eax, ebx, ecx, edx); | |
606 | } | |
607 | ||
608 | /* | |
609 | * CPUID functions returning a single datum | |
610 | */ | |
611 | static inline unsigned int cpuid_eax(unsigned int op) | |
612 | { | |
613 | unsigned int eax, ebx, ecx, edx; | |
614 | ||
615 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 616 | |
c758ecf6 GOC |
617 | return eax; |
618 | } | |
4d46a89e | 619 | |
c758ecf6 GOC |
620 | static inline unsigned int cpuid_ebx(unsigned int op) |
621 | { | |
622 | unsigned int eax, ebx, ecx, edx; | |
623 | ||
624 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 625 | |
c758ecf6 GOC |
626 | return ebx; |
627 | } | |
4d46a89e | 628 | |
c758ecf6 GOC |
629 | static inline unsigned int cpuid_ecx(unsigned int op) |
630 | { | |
631 | unsigned int eax, ebx, ecx, edx; | |
632 | ||
633 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 634 | |
c758ecf6 GOC |
635 | return ecx; |
636 | } | |
4d46a89e | 637 | |
c758ecf6 GOC |
638 | static inline unsigned int cpuid_edx(unsigned int op) |
639 | { | |
640 | unsigned int eax, ebx, ecx, edx; | |
641 | ||
642 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 643 | |
c758ecf6 GOC |
644 | return edx; |
645 | } | |
646 | ||
683e0253 | 647 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
0b101e62 | 648 | static __always_inline void rep_nop(void) |
683e0253 | 649 | { |
cca2e6f8 | 650 | asm volatile("rep; nop" ::: "memory"); |
683e0253 GOC |
651 | } |
652 | ||
0b101e62 | 653 | static __always_inline void cpu_relax(void) |
4d46a89e IM |
654 | { |
655 | rep_nop(); | |
656 | } | |
657 | ||
c198b121 AL |
658 | /* |
659 | * This function forces the icache and prefetched instruction stream to | |
660 | * catch up with reality in two very specific cases: | |
661 | * | |
662 | * a) Text was modified using one virtual address and is about to be executed | |
663 | * from the same physical page at a different virtual address. | |
664 | * | |
665 | * b) Text was modified on a different CPU, may subsequently be | |
666 | * executed on this CPU, and you want to make sure the new version | |
667 | * gets executed. This generally means you're calling this in a IPI. | |
668 | * | |
669 | * If you're calling this for a different reason, you're probably doing | |
670 | * it wrong. | |
671 | */ | |
683e0253 GOC |
672 | static inline void sync_core(void) |
673 | { | |
45c39fb0 | 674 | /* |
c198b121 AL |
675 | * There are quite a few ways to do this. IRET-to-self is nice |
676 | * because it works on every CPU, at any CPL (so it's compatible | |
677 | * with paravirtualization), and it never exits to a hypervisor. | |
678 | * The only down sides are that it's a bit slow (it seems to be | |
679 | * a bit more than 2x slower than the fastest options) and that | |
680 | * it unmasks NMIs. The "push %cs" is needed because, in | |
681 | * paravirtual environments, __KERNEL_CS may not be a valid CS | |
682 | * value when we do IRET directly. | |
683 | * | |
684 | * In case NMI unmasking or performance ever becomes a problem, | |
685 | * the next best option appears to be MOV-to-CR2 and an | |
686 | * unconditional jump. That sequence also works on all CPUs, | |
ecda85e7 | 687 | * but it will fault at CPL3 (i.e. Xen PV). |
c198b121 AL |
688 | * |
689 | * CPUID is the conventional way, but it's nasty: it doesn't | |
690 | * exist on some 486-like CPUs, and it usually exits to a | |
691 | * hypervisor. | |
692 | * | |
693 | * Like all of Linux's memory ordering operations, this is a | |
694 | * compiler barrier as well. | |
45c39fb0 | 695 | */ |
c198b121 AL |
696 | #ifdef CONFIG_X86_32 |
697 | asm volatile ( | |
698 | "pushfl\n\t" | |
699 | "pushl %%cs\n\t" | |
700 | "pushl $1f\n\t" | |
701 | "iret\n\t" | |
702 | "1:" | |
f5caf621 | 703 | : ASM_CALL_CONSTRAINT : : "memory"); |
45c39fb0 | 704 | #else |
c198b121 AL |
705 | unsigned int tmp; |
706 | ||
707 | asm volatile ( | |
76846bf3 | 708 | UNWIND_HINT_SAVE |
c198b121 AL |
709 | "mov %%ss, %0\n\t" |
710 | "pushq %q0\n\t" | |
711 | "pushq %%rsp\n\t" | |
712 | "addq $8, (%%rsp)\n\t" | |
713 | "pushfq\n\t" | |
714 | "mov %%cs, %0\n\t" | |
715 | "pushq %q0\n\t" | |
716 | "pushq $1f\n\t" | |
717 | "iretq\n\t" | |
76846bf3 | 718 | UNWIND_HINT_RESTORE |
c198b121 | 719 | "1:" |
f5caf621 | 720 | : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); |
5367b688 | 721 | #endif |
683e0253 GOC |
722 | } |
723 | ||
683e0253 | 724 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
07c94a38 | 725 | extern void amd_e400_c1e_apic_setup(void); |
683e0253 | 726 | |
4d46a89e | 727 | extern unsigned long boot_option_idle_override; |
683e0253 | 728 | |
d1896049 | 729 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
69fb3676 | 730 | IDLE_POLL}; |
d1896049 | 731 | |
1a53905a GOC |
732 | extern void enable_sep_cpu(void); |
733 | extern int sysenter_setup(void); | |
734 | ||
29c84391 | 735 | |
1a53905a | 736 | /* Defined in head.S */ |
4d46a89e | 737 | extern struct desc_ptr early_gdt_descr; |
1a53905a | 738 | |
552be871 | 739 | extern void switch_to_new_gdt(int); |
45fc8757 | 740 | extern void load_direct_gdt(int); |
69218e47 | 741 | extern void load_fixmap_gdt(int); |
11e3a840 | 742 | extern void load_percpu_segment(int); |
1a53905a | 743 | extern void cpu_init(void); |
7652ac92 | 744 | extern void cr4_init(void); |
1a53905a | 745 | |
c2724775 MM |
746 | static inline unsigned long get_debugctlmsr(void) |
747 | { | |
ea8e61b7 | 748 | unsigned long debugctlmsr = 0; |
c2724775 MM |
749 | |
750 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
751 | if (boot_cpu_data.x86 < 6) | |
752 | return 0; | |
753 | #endif | |
754 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
755 | ||
ea8e61b7 | 756 | return debugctlmsr; |
c2724775 MM |
757 | } |
758 | ||
5b0e5084 JB |
759 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
760 | { | |
761 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
762 | if (boot_cpu_data.x86 < 6) | |
763 | return; | |
764 | #endif | |
765 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
766 | } | |
767 | ||
9bd1190a ON |
768 | extern void set_task_blockstep(struct task_struct *task, bool on); |
769 | ||
4d46a89e IM |
770 | /* Boot loader type from the setup header: */ |
771 | extern int bootloader_type; | |
5031296c | 772 | extern int bootloader_version; |
1a53905a | 773 | |
4d46a89e | 774 | extern char ignore_fpu_irq; |
683e0253 GOC |
775 | |
776 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | |
777 | #define ARCH_HAS_PREFETCHW | |
778 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
779 | ||
ae2e15eb | 780 | #ifdef CONFIG_X86_32 |
a930dc45 | 781 | # define BASE_PREFETCH "" |
4d46a89e | 782 | # define ARCH_HAS_PREFETCH |
ae2e15eb | 783 | #else |
a930dc45 | 784 | # define BASE_PREFETCH "prefetcht0 %P1" |
ae2e15eb GOC |
785 | #endif |
786 | ||
4d46a89e IM |
787 | /* |
788 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) | |
789 | * | |
790 | * It's not worth to care about 3dnow prefetches for the K6 | |
791 | * because they are microcoded there and very slow. | |
792 | */ | |
ae2e15eb GOC |
793 | static inline void prefetch(const void *x) |
794 | { | |
a930dc45 | 795 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
ae2e15eb | 796 | X86_FEATURE_XMM, |
a930dc45 | 797 | "m" (*(const char *)x)); |
ae2e15eb GOC |
798 | } |
799 | ||
4d46a89e IM |
800 | /* |
801 | * 3dnow prefetch to get an exclusive cache line. | |
802 | * Useful for spinlocks to avoid one state transition in the | |
803 | * cache coherency protocol: | |
804 | */ | |
ae2e15eb GOC |
805 | static inline void prefetchw(const void *x) |
806 | { | |
a930dc45 BP |
807 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
808 | X86_FEATURE_3DNOWPREFETCH, | |
809 | "m" (*(const char *)x)); | |
ae2e15eb GOC |
810 | } |
811 | ||
4d46a89e IM |
812 | static inline void spin_lock_prefetch(const void *x) |
813 | { | |
814 | prefetchw(x); | |
815 | } | |
816 | ||
d9e05cc5 AL |
817 | #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
818 | TOP_OF_KERNEL_STACK_PADDING) | |
819 | ||
3500130b AL |
820 | #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) |
821 | ||
d375cf15 AL |
822 | #define task_pt_regs(task) \ |
823 | ({ \ | |
824 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ | |
825 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ | |
826 | ((struct pt_regs *)__ptr) - 1; \ | |
827 | }) | |
828 | ||
2f66dcc9 GOC |
829 | #ifdef CONFIG_X86_32 |
830 | /* | |
831 | * User space process size: 3GB (default). | |
832 | */ | |
8f3e474f | 833 | #define IA32_PAGE_OFFSET PAGE_OFFSET |
4d46a89e | 834 | #define TASK_SIZE PAGE_OFFSET |
b569bab7 | 835 | #define TASK_SIZE_LOW TASK_SIZE |
d9517346 | 836 | #define TASK_SIZE_MAX TASK_SIZE |
44b04912 | 837 | #define DEFAULT_MAP_WINDOW TASK_SIZE |
4d46a89e IM |
838 | #define STACK_TOP TASK_SIZE |
839 | #define STACK_TOP_MAX STACK_TOP | |
840 | ||
841 | #define INIT_THREAD { \ | |
d9e05cc5 | 842 | .sp0 = TOP_OF_INIT_STACK, \ |
4d46a89e IM |
843 | .sysenter_cs = __KERNEL_CS, \ |
844 | .io_bitmap_ptr = NULL, \ | |
13d4ea09 | 845 | .addr_limit = KERNEL_DS, \ |
2f66dcc9 GOC |
846 | } |
847 | ||
4d46a89e | 848 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
2f66dcc9 GOC |
849 | |
850 | #else | |
851 | /* | |
f55f0501 AL |
852 | * User space process size. This is the first address outside the user range. |
853 | * There are a few constraints that determine this: | |
854 | * | |
855 | * On Intel CPUs, if a SYSCALL instruction is at the highest canonical | |
856 | * address, then that syscall will enter the kernel with a | |
857 | * non-canonical return address, and SYSRET will explode dangerously. | |
858 | * We avoid this particular problem by preventing anything executable | |
859 | * from being mapped at the maximum canonical address. | |
860 | * | |
861 | * On AMD CPUs in the Ryzen family, there's a nasty bug in which the | |
862 | * CPUs malfunction if they execute code from the highest canonical page. | |
863 | * They'll speculate right off the end of the canonical space, and | |
864 | * bad things happen. This is worked around in the same way as the | |
865 | * Intel problem. | |
866 | * | |
867 | * With page table isolation enabled, we map the LDT in ... [stay tuned] | |
2f66dcc9 | 868 | */ |
ee00f4a3 | 869 | #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) |
2f66dcc9 | 870 | |
ee00f4a3 | 871 | #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) |
2f66dcc9 GOC |
872 | |
873 | /* This decides where the kernel will search for a free chunk of vm | |
874 | * space during mmap's. | |
875 | */ | |
4d46a89e IM |
876 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
877 | 0xc0000000 : 0xFFFFe000) | |
2f66dcc9 | 878 | |
b569bab7 KS |
879 | #define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \ |
880 | IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW) | |
6bd33008 | 881 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
d9517346 | 882 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
6bd33008 | 883 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
d9517346 | 884 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
2f66dcc9 | 885 | |
b569bab7 | 886 | #define STACK_TOP TASK_SIZE_LOW |
d9517346 | 887 | #define STACK_TOP_MAX TASK_SIZE_MAX |
922a70d3 | 888 | |
13d4ea09 | 889 | #define INIT_THREAD { \ |
13d4ea09 | 890 | .addr_limit = KERNEL_DS, \ |
2f66dcc9 GOC |
891 | } |
892 | ||
89240ba0 | 893 | extern unsigned long KSTK_ESP(struct task_struct *task); |
d046ff8b | 894 | |
2f66dcc9 GOC |
895 | #endif /* CONFIG_X86_64 */ |
896 | ||
513ad84b IM |
897 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
898 | unsigned long new_sp); | |
899 | ||
4d46a89e IM |
900 | /* |
901 | * This decides where the kernel will search for a free chunk of vm | |
683e0253 GOC |
902 | * space during mmap's. |
903 | */ | |
8f3e474f | 904 | #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3)) |
b569bab7 | 905 | #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW) |
683e0253 | 906 | |
4d46a89e | 907 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
683e0253 | 908 | |
529e25f6 EB |
909 | /* Get/set a process' ability to use the timestamp counter instruction */ |
910 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) | |
911 | #define SET_TSC_CTL(val) set_tsc_mode((val)) | |
912 | ||
913 | extern int get_tsc_mode(unsigned long adr); | |
914 | extern int set_tsc_mode(unsigned int val); | |
915 | ||
e9ea1e7f KH |
916 | DECLARE_PER_CPU(u64, msr_misc_features_shadow); |
917 | ||
fe3d197f | 918 | /* Register/unregister a process' MPX related resource */ |
46a6e0cf DH |
919 | #define MPX_ENABLE_MANAGEMENT() mpx_enable_management() |
920 | #define MPX_DISABLE_MANAGEMENT() mpx_disable_management() | |
fe3d197f DH |
921 | |
922 | #ifdef CONFIG_X86_INTEL_MPX | |
46a6e0cf DH |
923 | extern int mpx_enable_management(void); |
924 | extern int mpx_disable_management(void); | |
fe3d197f | 925 | #else |
46a6e0cf | 926 | static inline int mpx_enable_management(void) |
fe3d197f DH |
927 | { |
928 | return -EINVAL; | |
929 | } | |
46a6e0cf | 930 | static inline int mpx_disable_management(void) |
fe3d197f DH |
931 | { |
932 | return -EINVAL; | |
933 | } | |
934 | #endif /* CONFIG_X86_INTEL_MPX */ | |
935 | ||
bc8e80d5 | 936 | #ifdef CONFIG_CPU_SUP_AMD |
8b84c8df | 937 | extern u16 amd_get_nb_id(int cpu); |
cc2749e4 | 938 | extern u32 amd_get_nodes_per_socket(void); |
bc8e80d5 BP |
939 | #else |
940 | static inline u16 amd_get_nb_id(int cpu) { return 0; } | |
941 | static inline u32 amd_get_nodes_per_socket(void) { return 0; } | |
942 | #endif | |
6a812691 | 943 | |
96e39ac0 JW |
944 | static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
945 | { | |
946 | uint32_t base, eax, signature[3]; | |
947 | ||
948 | for (base = 0x40000000; base < 0x40010000; base += 0x100) { | |
949 | cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); | |
950 | ||
951 | if (!memcmp(sig, signature, 12) && | |
952 | (leaves == 0 || ((eax - base) >= leaves))) | |
953 | return base; | |
954 | } | |
955 | ||
956 | return 0; | |
957 | } | |
958 | ||
f05e798a | 959 | extern unsigned long arch_align_stack(unsigned long sp); |
e5cb113f | 960 | void free_init_pages(const char *what, unsigned long begin, unsigned long end); |
6ea2738e | 961 | extern void free_kernel_image_pages(void *begin, void *end); |
f05e798a DH |
962 | |
963 | void default_idle(void); | |
6a377ddc LB |
964 | #ifdef CONFIG_XEN |
965 | bool xen_set_default_idle(void); | |
966 | #else | |
967 | #define xen_set_default_idle 0 | |
968 | #endif | |
f05e798a DH |
969 | |
970 | void stop_this_cpu(void *dummy); | |
4d067d8e | 971 | void df_debug(struct pt_regs *regs, long error_code); |
1008c52c | 972 | void microcode_check(void); |
d90a7a0e JK |
973 | |
974 | enum l1tf_mitigations { | |
975 | L1TF_MITIGATION_OFF, | |
976 | L1TF_MITIGATION_FLUSH_NOWARN, | |
977 | L1TF_MITIGATION_FLUSH, | |
978 | L1TF_MITIGATION_FLUSH_NOSMT, | |
979 | L1TF_MITIGATION_FULL, | |
980 | L1TF_MITIGATION_FULL_FORCE | |
981 | }; | |
982 | ||
983 | extern enum l1tf_mitigations l1tf_mitigation; | |
984 | ||
bc124170 TG |
985 | enum mds_mitigations { |
986 | MDS_MITIGATION_OFF, | |
987 | MDS_MITIGATION_FULL, | |
22dd8365 | 988 | MDS_MITIGATION_VMWERV, |
bc124170 TG |
989 | }; |
990 | ||
1b42f017 PG |
991 | enum taa_mitigations { |
992 | TAA_MITIGATION_OFF, | |
993 | TAA_MITIGATION_UCODE_NEEDED, | |
994 | TAA_MITIGATION_VERW, | |
995 | TAA_MITIGATION_TSX_DISABLED, | |
996 | }; | |
997 | ||
1965aae3 | 998 | #endif /* _ASM_X86_PROCESSOR_H */ |