Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PROCESSOR_H |
3 | #define _ASM_X86_PROCESSOR_H | |
c758ecf6 | 4 | |
053de044 GOC |
5 | #include <asm/processor-flags.h> |
6 | ||
683e0253 GOC |
7 | /* Forward declaration, a strange C thing */ |
8 | struct task_struct; | |
9 | struct mm_struct; | |
577d5cd7 | 10 | struct io_bitmap; |
9fda6a06 | 11 | struct vm86; |
683e0253 | 12 | |
2f66dcc9 GOC |
13 | #include <asm/math_emu.h> |
14 | #include <asm/segment.h> | |
2f66dcc9 | 15 | #include <asm/types.h> |
decb4c41 | 16 | #include <uapi/asm/sigcontext.h> |
2f66dcc9 | 17 | #include <asm/current.h> |
cd4d09ec | 18 | #include <asm/cpufeatures.h> |
d8001690 | 19 | #include <asm/cpuid.h> |
2f66dcc9 | 20 | #include <asm/page.h> |
54321d94 | 21 | #include <asm/pgtable_types.h> |
5300db88 | 22 | #include <asm/percpu.h> |
2f66dcc9 GOC |
23 | #include <asm/msr.h> |
24 | #include <asm/desc_defs.h> | |
bd61643e | 25 | #include <asm/nops.h> |
f05e798a | 26 | #include <asm/special_insns.h> |
14b9675a | 27 | #include <asm/fpu/types.h> |
76846bf3 | 28 | #include <asm/unwind_hints.h> |
15934878 | 29 | #include <asm/vmxfeatures.h> |
abc22418 | 30 | #include <asm/vdso/processor.h> |
98cfa463 | 31 | #include <asm/shstk.h> |
4d46a89e | 32 | |
2f66dcc9 | 33 | #include <linux/personality.h> |
5300db88 | 34 | #include <linux/cache.h> |
2f66dcc9 | 35 | #include <linux/threads.h> |
5cbc19a9 | 36 | #include <linux/math64.h> |
faa4602e | 37 | #include <linux/err.h> |
f05e798a | 38 | #include <linux/irqflags.h> |
21729f81 | 39 | #include <linux/mem_encrypt.h> |
f05e798a DH |
40 | |
41 | /* | |
42 | * We handle most unaligned accesses in hardware. On the other hand | |
43 | * unaligned DMA can be quite expensive on some Nehalem processors. | |
44 | * | |
45 | * Based on this we disable the IP header alignment in network drivers. | |
46 | */ | |
47 | #define NET_IP_ALIGN 0 | |
c72dcf83 | 48 | |
b332828c | 49 | #define HBP_NUM 4 |
0ccb8acc | 50 | |
b8c1b8ea IM |
51 | /* |
52 | * These alignment constraints are for performance in the vSMP case, | |
53 | * but in the task_struct case we must also meet hardware imposed | |
54 | * alignment requirements of the FPU state: | |
55 | */ | |
dbcb4660 | 56 | #ifdef CONFIG_X86_VSMP |
4d46a89e IM |
57 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
58 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | |
dbcb4660 | 59 | #else |
b8c1b8ea | 60 | # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state) |
4d46a89e | 61 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
dbcb4660 GOC |
62 | #endif |
63 | ||
e0ba94f1 AS |
64 | enum tlb_infos { |
65 | ENTRIES, | |
66 | NR_INFO | |
67 | }; | |
68 | ||
69 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; | |
70 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
71 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
72 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
73 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
74 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 75 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
c4211f42 | 76 | |
5300db88 | 77 | /* |
b9655e70 | 78 | * CPU type and hardware bug flags. Kept separately for each CPU. |
5300db88 GOC |
79 | */ |
80 | ||
b9655e70 TG |
81 | struct cpuinfo_topology { |
82 | // Real APIC ID read from the local APIC | |
83 | u32 apicid; | |
84 | // The initial APIC ID provided by CPUID | |
85 | u32 initial_apicid; | |
02fb601d TG |
86 | |
87 | // Physical package ID | |
88 | u32 pkg_id; | |
8a169ed4 TG |
89 | |
90 | // Physical die ID on AMD, Relative on Intel | |
91 | u32 die_id; | |
e9525633 | 92 | |
e3c0c5d5 TG |
93 | // Compute unit ID - AMD specific |
94 | u32 cu_id; | |
95 | ||
e9525633 TG |
96 | // Core ID relative to the package |
97 | u32 core_id; | |
22dc9631 TG |
98 | |
99 | // Logical ID mappings | |
100 | u32 logical_pkg_id; | |
101 | u32 logical_die_id; | |
6e290323 | 102 | |
7e3ec628 TG |
103 | // AMD Node ID and Nodes per Package info |
104 | u32 amd_node_id; | |
105 | ||
6e290323 TG |
106 | // Cache level topology IDs |
107 | u32 llc_id; | |
108 | u32 l2c_id; | |
b9655e70 TG |
109 | }; |
110 | ||
5300db88 | 111 | struct cpuinfo_x86 { |
4d46a89e IM |
112 | __u8 x86; /* CPU family */ |
113 | __u8 x86_vendor; /* CPU vendor */ | |
114 | __u8 x86_model; | |
b399151c | 115 | __u8 x86_stepping; |
6415813b | 116 | #ifdef CONFIG_X86_64 |
4d46a89e | 117 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
b1882e68 | 118 | int x86_tlbsize; |
b47ce1fe SC |
119 | #endif |
120 | #ifdef CONFIG_X86_VMX_FEATURE_NAMES | |
121 | __u32 vmx_capability[NVMXINTS]; | |
13c6c532 | 122 | #endif |
4d46a89e IM |
123 | __u8 x86_virt_bits; |
124 | __u8 x86_phys_bits; | |
125 | /* CPUID returned core id bits: */ | |
126 | __u8 x86_coreid_bits; | |
127 | /* Max extended CPUID function supported: */ | |
128 | __u32 extended_cpuid_level; | |
4d46a89e IM |
129 | /* Maximum supported CPUID level, -1=no CPUID: */ |
130 | int cpuid_level; | |
db8c33f8 FY |
131 | /* |
132 | * Align to size of unsigned long because the x86_capability array | |
133 | * is passed to bitops which require the alignment. Use unnamed | |
134 | * union to enforce the array is aligned to size of unsigned long. | |
135 | */ | |
136 | union { | |
137 | __u32 x86_capability[NCAPINTS + NBUGINTS]; | |
138 | unsigned long x86_capability_alignment; | |
139 | }; | |
4d46a89e IM |
140 | char x86_vendor_id[16]; |
141 | char x86_model_id[64]; | |
b9655e70 | 142 | struct cpuinfo_topology topo; |
4d46a89e | 143 | /* in KB - valid for CPUS which support this call: */ |
24dbc600 | 144 | unsigned int x86_cache_size; |
4d46a89e | 145 | int x86_cache_alignment; /* In bytes */ |
f3d44f18 | 146 | /* Cache QoS architectural values, valid only on the BSP: */ |
cbc82b17 PWJ |
147 | int x86_cache_max_rmid; /* max index */ |
148 | int x86_cache_occ_scale; /* scale to bytes */ | |
f3d44f18 | 149 | int x86_cache_mbm_width_offset; |
4d46a89e IM |
150 | int x86_power; |
151 | unsigned long loops_per_jiffy; | |
822ccfad TL |
152 | /* protected processor identification number */ |
153 | u64 ppin; | |
4d46a89e | 154 | /* cpuid returned max cores value: */ |
7745f03e | 155 | u16 x86_max_cores; |
4d46a89e | 156 | u16 x86_clflush_size; |
4d46a89e IM |
157 | /* number of cores as seen by the OS: */ |
158 | u16 booted_cores; | |
4d46a89e IM |
159 | /* Index into per_cpu list: */ |
160 | u16 cpu_index; | |
c52787b5 BS |
161 | /* Is SMT active on this core? */ |
162 | bool smt_active; | |
506ed6b5 | 163 | u32 microcode; |
cc51e542 AK |
164 | /* Address space bits used by the cache internally */ |
165 | u8 x86_cache_bits; | |
30bb9811 | 166 | unsigned initialized : 1; |
3859a271 | 167 | } __randomize_layout; |
5300db88 | 168 | |
4d46a89e IM |
169 | #define X86_VENDOR_INTEL 0 |
170 | #define X86_VENDOR_CYRIX 1 | |
171 | #define X86_VENDOR_AMD 2 | |
172 | #define X86_VENDOR_UMC 3 | |
4d46a89e IM |
173 | #define X86_VENDOR_CENTAUR 5 |
174 | #define X86_VENDOR_TRANSMETA 7 | |
175 | #define X86_VENDOR_NSC 8 | |
c9661c1e | 176 | #define X86_VENDOR_HYGON 9 |
761fdd5e | 177 | #define X86_VENDOR_ZHAOXIN 10 |
639475d4 MDSV |
178 | #define X86_VENDOR_VORTEX 11 |
179 | #define X86_VENDOR_NUM 12 | |
4d46a89e IM |
180 | |
181 | #define X86_VENDOR_UNKNOWN 0xff | |
5300db88 | 182 | |
1a53905a GOC |
183 | /* |
184 | * capabilities of CPUs | |
185 | */ | |
4d46a89e IM |
186 | extern struct cpuinfo_x86 boot_cpu_data; |
187 | extern struct cpuinfo_x86 new_cpu_data; | |
188 | ||
6cbd2171 TG |
189 | extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; |
190 | extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; | |
5300db88 GOC |
191 | |
192 | #ifdef CONFIG_SMP | |
2c773dd3 | 193 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
5300db88 | 194 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
5300db88 | 195 | #else |
7b543a53 | 196 | #define cpu_info boot_cpu_data |
5300db88 | 197 | #define cpu_data(cpu) boot_cpu_data |
5300db88 GOC |
198 | #endif |
199 | ||
1c6c727d JS |
200 | extern const struct seq_operations cpuinfo_op; |
201 | ||
4d46a89e IM |
202 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
203 | ||
204 | extern void cpu_detect(struct cpuinfo_x86 *c); | |
1a53905a | 205 | |
9df95169 | 206 | static inline unsigned long long l1tf_pfn_limit(void) |
17dbca11 | 207 | { |
cc51e542 | 208 | return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); |
17dbca11 AK |
209 | } |
210 | ||
f580366f | 211 | extern void early_cpu_init(void); |
1a53905a | 212 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
5300db88 | 213 | extern void print_cpu_info(struct cpuinfo_x86 *); |
21c3fcf3 | 214 | void print_cpu_msr(struct cpuinfo_x86 *); |
1a53905a | 215 | |
6c690ee1 AL |
216 | /* |
217 | * Friendlier CR3 helpers. | |
218 | */ | |
219 | static inline unsigned long read_cr3_pa(void) | |
220 | { | |
221 | return __read_cr3() & CR3_ADDR_MASK; | |
222 | } | |
223 | ||
eef9c4ab TL |
224 | static inline unsigned long native_read_cr3_pa(void) |
225 | { | |
226 | return __native_read_cr3() & CR3_ADDR_MASK; | |
227 | } | |
228 | ||
c72dcf83 GOC |
229 | static inline void load_cr3(pgd_t *pgdir) |
230 | { | |
21729f81 | 231 | write_cr3(__sme_pa(pgdir)); |
c72dcf83 | 232 | } |
c758ecf6 | 233 | |
7fb983b4 AL |
234 | /* |
235 | * Note that while the legacy 'TSS' name comes from 'Task State Segment', | |
236 | * on modern x86 CPUs the TSS also holds information important to 64-bit mode, | |
237 | * unrelated to the task-switch mechanism: | |
238 | */ | |
ca241c75 GOC |
239 | #ifdef CONFIG_X86_32 |
240 | /* This is the TSS defined by the hardware. */ | |
241 | struct x86_hw_tss { | |
4d46a89e IM |
242 | unsigned short back_link, __blh; |
243 | unsigned long sp0; | |
244 | unsigned short ss0, __ss0h; | |
cf9328cc | 245 | unsigned long sp1; |
76e4c490 AL |
246 | |
247 | /* | |
cf9328cc AL |
248 | * We don't use ring 1, so ss1 is a convenient scratch space in |
249 | * the same cacheline as sp0. We use ss1 to cache the value in | |
250 | * MSR_IA32_SYSENTER_CS. When we context switch | |
251 | * MSR_IA32_SYSENTER_CS, we first check if the new value being | |
252 | * written matches ss1, and, if it's not, then we wrmsr the new | |
253 | * value and update ss1. | |
76e4c490 | 254 | * |
cf9328cc AL |
255 | * The only reason we context switch MSR_IA32_SYSENTER_CS is |
256 | * that we set it to zero in vm86 tasks to avoid corrupting the | |
257 | * stack if we were to go through the sysenter path from vm86 | |
258 | * mode. | |
76e4c490 | 259 | */ |
76e4c490 AL |
260 | unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
261 | ||
262 | unsigned short __ss1h; | |
4d46a89e IM |
263 | unsigned long sp2; |
264 | unsigned short ss2, __ss2h; | |
265 | unsigned long __cr3; | |
266 | unsigned long ip; | |
267 | unsigned long flags; | |
268 | unsigned long ax; | |
269 | unsigned long cx; | |
270 | unsigned long dx; | |
271 | unsigned long bx; | |
272 | unsigned long sp; | |
273 | unsigned long bp; | |
274 | unsigned long si; | |
275 | unsigned long di; | |
276 | unsigned short es, __esh; | |
277 | unsigned short cs, __csh; | |
278 | unsigned short ss, __ssh; | |
279 | unsigned short ds, __dsh; | |
280 | unsigned short fs, __fsh; | |
281 | unsigned short gs, __gsh; | |
282 | unsigned short ldt, __ldth; | |
283 | unsigned short trace; | |
284 | unsigned short io_bitmap_base; | |
285 | ||
ca241c75 GOC |
286 | } __attribute__((packed)); |
287 | #else | |
288 | struct x86_hw_tss { | |
4d46a89e IM |
289 | u32 reserved1; |
290 | u64 sp0; | |
291 | u64 sp1; | |
9aaefe7b | 292 | |
98f05b51 AL |
293 | /* |
294 | * Since Linux does not use ring 2, the 'sp2' slot is unused by | |
295 | * hardware. entry_SYSCALL_64 uses it as scratch space to stash | |
296 | * the user RSP value. | |
297 | */ | |
4d46a89e | 298 | u64 sp2; |
98f05b51 | 299 | |
4d46a89e IM |
300 | u64 reserved2; |
301 | u64 ist[7]; | |
302 | u32 reserved3; | |
303 | u32 reserved4; | |
304 | u16 reserved5; | |
305 | u16 io_bitmap_base; | |
306 | ||
d3273dea | 307 | } __attribute__((packed)); |
ca241c75 GOC |
308 | #endif |
309 | ||
310 | /* | |
4d46a89e | 311 | * IO-bitmap sizes: |
ca241c75 | 312 | */ |
4d46a89e | 313 | #define IO_BITMAP_BITS 65536 |
f5848e5f TG |
314 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE) |
315 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long)) | |
ecc7e37d | 316 | |
c8137ace | 317 | #define IO_BITMAP_OFFSET_VALID_MAP \ |
f5848e5f | 318 | (offsetof(struct tss_struct, io_bitmap.bitmap) - \ |
ecc7e37d TG |
319 | offsetof(struct tss_struct, x86_tss)) |
320 | ||
c8137ace TG |
321 | #define IO_BITMAP_OFFSET_VALID_ALL \ |
322 | (offsetof(struct tss_struct, io_bitmap.mapall) - \ | |
323 | offsetof(struct tss_struct, x86_tss)) | |
324 | ||
111e7b15 | 325 | #ifdef CONFIG_X86_IOPL_IOPERM |
ecc7e37d | 326 | /* |
c8137ace TG |
327 | * sizeof(unsigned long) coming from an extra "long" at the end of the |
328 | * iobitmap. The limit is inclusive, i.e. the last valid byte. | |
ecc7e37d | 329 | */ |
111e7b15 | 330 | # define __KERNEL_TSS_LIMIT \ |
c8137ace TG |
331 | (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \ |
332 | sizeof(unsigned long) - 1) | |
111e7b15 TG |
333 | #else |
334 | # define __KERNEL_TSS_LIMIT \ | |
335 | (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1) | |
336 | #endif | |
ecc7e37d TG |
337 | |
338 | /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */ | |
339 | #define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1) | |
ca241c75 | 340 | |
4fe2d8b1 | 341 | struct entry_stack { |
c7aadc09 | 342 | char stack[PAGE_SIZE]; |
0f9a4810 AL |
343 | }; |
344 | ||
4fe2d8b1 DH |
345 | struct entry_stack_page { |
346 | struct entry_stack stack; | |
c482feef | 347 | } __aligned(PAGE_SIZE); |
1a935bc3 | 348 | |
f5848e5f TG |
349 | /* |
350 | * All IO bitmap related data stored in the TSS: | |
351 | */ | |
352 | struct x86_io_bitmap { | |
060aa16f TG |
353 | /* The sequence number of the last active bitmap. */ |
354 | u64 prev_sequence; | |
355 | ||
4d46a89e | 356 | /* |
ecc7e37d TG |
357 | * Store the dirty size of the last io bitmap offender. The next |
358 | * one will have to do the cleanup as the switch out to a non io | |
359 | * bitmap user will just set x86_tss.io_bitmap_base to a value | |
360 | * outside of the TSS limit. So for sane tasks there is no need to | |
361 | * actually touch the io_bitmap at all. | |
4d46a89e | 362 | */ |
f5848e5f | 363 | unsigned int prev_max; |
ca241c75 GOC |
364 | |
365 | /* | |
366 | * The extra 1 is there because the CPU will access an | |
367 | * additional byte beyond the end of the IO permission | |
368 | * bitmap. The extra byte must be all 1 bits, and must | |
369 | * be within the limit. | |
370 | */ | |
f5848e5f | 371 | unsigned long bitmap[IO_BITMAP_LONGS + 1]; |
c8137ace TG |
372 | |
373 | /* | |
374 | * Special I/O bitmap to emulate IOPL(3). All bytes zero, | |
375 | * except the additional byte at the end. | |
376 | */ | |
377 | unsigned long mapall[IO_BITMAP_LONGS + 1]; | |
f5848e5f TG |
378 | }; |
379 | ||
380 | struct tss_struct { | |
381 | /* | |
382 | * The fixed hardware portion. This must not cross a page boundary | |
383 | * at risk of violating the SDM's advice and potentially triggering | |
384 | * errata. | |
385 | */ | |
386 | struct x86_hw_tss x86_tss; | |
387 | ||
388 | struct x86_io_bitmap io_bitmap; | |
1a935bc3 | 389 | } __aligned(PAGE_SIZE); |
4d46a89e | 390 | |
c482feef | 391 | DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); |
ca241c75 | 392 | |
e6401c13 AL |
393 | /* Per CPU interrupt stacks */ |
394 | struct irq_stack { | |
395 | char stack[IRQ_STACK_SIZE]; | |
396 | } __aligned(IRQ_STACK_SIZE); | |
397 | ||
fe676203 | 398 | #ifdef CONFIG_X86_64 |
e6401c13 | 399 | struct fixed_percpu_data { |
947e76cd BG |
400 | /* |
401 | * GCC hardcodes the stack canary as %gs:40. Since the | |
402 | * irq_stack is the object at %gs:0, we reserve the bottom | |
403 | * 48 bytes of the irq stack for the canary. | |
3fb0fdb3 AL |
404 | * |
405 | * Once we are willing to require -mstack-protector-guard-symbol= | |
406 | * support for x86_64 stackprotector, we can get rid of this. | |
947e76cd | 407 | */ |
e6401c13 AL |
408 | char gs_base[40]; |
409 | unsigned long stack_canary; | |
947e76cd BG |
410 | }; |
411 | ||
e6401c13 AL |
412 | DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible; |
413 | DECLARE_INIT_PER_CPU(fixed_percpu_data); | |
2add8e23 | 414 | |
35060ed6 VK |
415 | static inline unsigned long cpu_kernelmode_gs_base(int cpu) |
416 | { | |
e6401c13 | 417 | return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); |
35060ed6 VK |
418 | } |
419 | ||
f71e1d2f | 420 | extern asmlinkage void entry_SYSCALL32_ignore(void); |
42b933b5 | 421 | |
42b933b5 | 422 | /* Save actual FS/GS selectors and bases to current->thread */ |
6758034e | 423 | void current_save_fsgs(void); |
60a5317f | 424 | #else /* X86_64 */ |
050e9baa | 425 | #ifdef CONFIG_STACKPROTECTOR |
3fb0fdb3 | 426 | DECLARE_PER_CPU(unsigned long, __stack_chk_guard); |
96a388de | 427 | #endif |
951c2a51 | 428 | #endif /* !X86_64 */ |
c758ecf6 | 429 | |
24f1e32c FW |
430 | struct perf_event; |
431 | ||
cb38d377 | 432 | struct thread_struct { |
4d46a89e IM |
433 | /* Cached TLS descriptors: */ |
434 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
d375cf15 | 435 | #ifdef CONFIG_X86_32 |
4d46a89e | 436 | unsigned long sp0; |
d375cf15 | 437 | #endif |
4d46a89e | 438 | unsigned long sp; |
cb38d377 | 439 | #ifdef CONFIG_X86_32 |
4d46a89e | 440 | unsigned long sysenter_cs; |
cb38d377 | 441 | #else |
4d46a89e IM |
442 | unsigned short es; |
443 | unsigned short ds; | |
444 | unsigned short fsindex; | |
445 | unsigned short gsindex; | |
cb38d377 | 446 | #endif |
b9d989c7 | 447 | |
d756f4ad | 448 | #ifdef CONFIG_X86_64 |
296f781a AL |
449 | unsigned long fsbase; |
450 | unsigned long gsbase; | |
451 | #else | |
452 | /* | |
453 | * XXX: this could presumably be unsigned short. Alternatively, | |
454 | * 32-bit kernels could be taught to use fsindex instead. | |
455 | */ | |
456 | unsigned long fs; | |
457 | unsigned long gs; | |
d756f4ad | 458 | #endif |
c5bedc68 | 459 | |
24f1e32c FW |
460 | /* Save middle states of ptrace breakpoints */ |
461 | struct perf_event *ptrace_bps[HBP_NUM]; | |
462 | /* Debug status used for traps, single steps, etc... */ | |
d53d9bc0 | 463 | unsigned long virtual_dr6; |
326264a0 FW |
464 | /* Keep track of the exact dr7 value set by the user */ |
465 | unsigned long ptrace_dr7; | |
4d46a89e IM |
466 | /* Fault info: */ |
467 | unsigned long cr2; | |
51e7dc70 | 468 | unsigned long trap_nr; |
4d46a89e | 469 | unsigned long error_code; |
9fda6a06 | 470 | #ifdef CONFIG_VM86 |
4d46a89e | 471 | /* Virtual 86 mode info */ |
9fda6a06 | 472 | struct vm86 *vm86; |
cb38d377 | 473 | #endif |
4d46a89e | 474 | /* IO permissions: */ |
577d5cd7 | 475 | struct io_bitmap *io_bitmap; |
c8137ace TG |
476 | |
477 | /* | |
d9f6e12f | 478 | * IOPL. Privilege level dependent I/O permission which is |
a24ca997 TG |
479 | * emulated via the I/O bitmap to prevent user space from disabling |
480 | * interrupts. | |
c8137ace | 481 | */ |
c8137ace | 482 | unsigned long iopl_emul; |
0c8c0f03 | 483 | |
b968e84b | 484 | unsigned int iopl_warn:1; |
2a53ccbc | 485 | unsigned int sig_on_uaccess_err:1; |
dfa9a942 | 486 | |
9782a712 DH |
487 | /* |
488 | * Protection Keys Register for Userspace. Loaded immediately on | |
489 | * context switch. Store it in thread_struct to avoid a lookup in | |
490 | * the tasks's FPU xstate buffer. This value is only valid when a | |
491 | * task is scheduled out. For 'current' the authoritative source of | |
492 | * PKRU is the hardware itself. | |
493 | */ | |
494 | u32 pkru; | |
495 | ||
98cfa463 RE |
496 | #ifdef CONFIG_X86_USER_SHADOW_STACK |
497 | unsigned long features; | |
498 | unsigned long features_locked; | |
2d39a6ad RE |
499 | |
500 | struct thread_shstk shstk; | |
98cfa463 RE |
501 | #endif |
502 | ||
0c8c0f03 DH |
503 | /* Floating point and extended processor state */ |
504 | struct fpu fpu; | |
505 | /* | |
506 | * WARNING: 'fpu' is dynamically-sized. It *MUST* be at | |
507 | * the end. | |
508 | */ | |
cb38d377 GOC |
509 | }; |
510 | ||
2dd8eedc TG |
511 | extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size); |
512 | ||
f7d83c1c KC |
513 | static inline void arch_thread_struct_whitelist(unsigned long *offset, |
514 | unsigned long *size) | |
515 | { | |
2dd8eedc | 516 | fpu_thread_struct_whitelist(offset, size); |
f7d83c1c KC |
517 | } |
518 | ||
4d46a89e | 519 | static inline void |
da51da18 | 520 | native_load_sp0(unsigned long sp0) |
7818a1e0 | 521 | { |
c482feef | 522 | this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); |
7818a1e0 | 523 | } |
1b46cbe0 | 524 | |
58edfd2e | 525 | static __always_inline void native_swapgs(void) |
e801f864 GOC |
526 | { |
527 | #ifdef CONFIG_X86_64 | |
528 | asm volatile("swapgs" ::: "memory"); | |
529 | #endif | |
530 | } | |
531 | ||
1894a403 | 532 | static __always_inline unsigned long current_top_of_stack(void) |
8ef46a67 | 533 | { |
9aaefe7b AL |
534 | /* |
535 | * We can't read directly from tss.sp0: sp0 on x86_32 is special in | |
536 | * and around vm86 mode and sp0 on x86_64 is special because of the | |
537 | * entry trampoline. | |
538 | */ | |
c063a217 | 539 | return this_cpu_read_stable(pcpu_hot.top_of_stack); |
8ef46a67 AL |
540 | } |
541 | ||
1894a403 | 542 | static __always_inline bool on_thread_stack(void) |
3383642c AL |
543 | { |
544 | return (unsigned long)(current_top_of_stack() - | |
545 | current_stack_pointer) < THREAD_SIZE; | |
546 | } | |
547 | ||
9bad5658 | 548 | #ifdef CONFIG_PARAVIRT_XXL |
7818a1e0 GOC |
549 | #include <asm/paravirt.h> |
550 | #else | |
1b46cbe0 | 551 | |
da51da18 | 552 | static inline void load_sp0(unsigned long sp0) |
7818a1e0 | 553 | { |
da51da18 | 554 | native_load_sp0(sp0); |
7818a1e0 GOC |
555 | } |
556 | ||
9bad5658 | 557 | #endif /* CONFIG_PARAVIRT_XXL */ |
1b46cbe0 | 558 | |
42a20f86 | 559 | unsigned long __get_wchan(struct task_struct *p); |
c758ecf6 | 560 | |
683e0253 | 561 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
07c94a38 | 562 | extern void amd_e400_c1e_apic_setup(void); |
683e0253 | 563 | |
4d46a89e | 564 | extern unsigned long boot_option_idle_override; |
683e0253 | 565 | |
d1896049 | 566 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
69fb3676 | 567 | IDLE_POLL}; |
d1896049 | 568 | |
1a53905a | 569 | extern void enable_sep_cpu(void); |
1a53905a | 570 | |
29c84391 | 571 | |
1a53905a | 572 | /* Defined in head.S */ |
4d46a89e | 573 | extern struct desc_ptr early_gdt_descr; |
1a53905a | 574 | |
1f19e2d5 | 575 | extern void switch_gdt_and_percpu_base(int); |
45fc8757 | 576 | extern void load_direct_gdt(int); |
69218e47 | 577 | extern void load_fixmap_gdt(int); |
1a53905a | 578 | extern void cpu_init(void); |
520d0308 | 579 | extern void cpu_init_exception_handling(void); |
7652ac92 | 580 | extern void cr4_init(void); |
1a53905a | 581 | |
c2724775 MM |
582 | static inline unsigned long get_debugctlmsr(void) |
583 | { | |
ea8e61b7 | 584 | unsigned long debugctlmsr = 0; |
c2724775 MM |
585 | |
586 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
587 | if (boot_cpu_data.x86 < 6) | |
588 | return 0; | |
589 | #endif | |
590 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
591 | ||
ea8e61b7 | 592 | return debugctlmsr; |
c2724775 MM |
593 | } |
594 | ||
5b0e5084 JB |
595 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
596 | { | |
597 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
598 | if (boot_cpu_data.x86 < 6) | |
599 | return; | |
600 | #endif | |
601 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
602 | } | |
603 | ||
9bd1190a ON |
604 | extern void set_task_blockstep(struct task_struct *task, bool on); |
605 | ||
4d46a89e IM |
606 | /* Boot loader type from the setup header: */ |
607 | extern int bootloader_type; | |
5031296c | 608 | extern int bootloader_version; |
1a53905a | 609 | |
4d46a89e | 610 | extern char ignore_fpu_irq; |
683e0253 GOC |
611 | |
612 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | |
613 | #define ARCH_HAS_PREFETCHW | |
683e0253 | 614 | |
ae2e15eb | 615 | #ifdef CONFIG_X86_32 |
a930dc45 | 616 | # define BASE_PREFETCH "" |
4d46a89e | 617 | # define ARCH_HAS_PREFETCH |
ae2e15eb | 618 | #else |
a930dc45 | 619 | # define BASE_PREFETCH "prefetcht0 %P1" |
ae2e15eb GOC |
620 | #endif |
621 | ||
4d46a89e IM |
622 | /* |
623 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) | |
624 | * | |
625 | * It's not worth to care about 3dnow prefetches for the K6 | |
626 | * because they are microcoded there and very slow. | |
627 | */ | |
ae2e15eb GOC |
628 | static inline void prefetch(const void *x) |
629 | { | |
a930dc45 | 630 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
ae2e15eb | 631 | X86_FEATURE_XMM, |
a930dc45 | 632 | "m" (*(const char *)x)); |
ae2e15eb GOC |
633 | } |
634 | ||
4d46a89e IM |
635 | /* |
636 | * 3dnow prefetch to get an exclusive cache line. | |
637 | * Useful for spinlocks to avoid one state transition in the | |
638 | * cache coherency protocol: | |
639 | */ | |
2823e83a | 640 | static __always_inline void prefetchw(const void *x) |
ae2e15eb | 641 | { |
a930dc45 BP |
642 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
643 | X86_FEATURE_3DNOWPREFETCH, | |
644 | "m" (*(const char *)x)); | |
ae2e15eb GOC |
645 | } |
646 | ||
d9e05cc5 AL |
647 | #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
648 | TOP_OF_KERNEL_STACK_PADDING) | |
649 | ||
3500130b AL |
650 | #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) |
651 | ||
d375cf15 AL |
652 | #define task_pt_regs(task) \ |
653 | ({ \ | |
654 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ | |
655 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ | |
656 | ((struct pt_regs *)__ptr) - 1; \ | |
657 | }) | |
658 | ||
2f66dcc9 | 659 | #ifdef CONFIG_X86_32 |
4d46a89e | 660 | #define INIT_THREAD { \ |
d9e05cc5 | 661 | .sp0 = TOP_OF_INIT_STACK, \ |
4d46a89e | 662 | .sysenter_cs = __KERNEL_CS, \ |
2f66dcc9 GOC |
663 | } |
664 | ||
4d46a89e | 665 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
2f66dcc9 GOC |
666 | |
667 | #else | |
3adee777 BG |
668 | extern unsigned long __end_init_task[]; |
669 | ||
670 | #define INIT_THREAD { \ | |
671 | .sp = (unsigned long)&__end_init_task - sizeof(struct pt_regs), \ | |
672 | } | |
2f66dcc9 | 673 | |
89240ba0 | 674 | extern unsigned long KSTK_ESP(struct task_struct *task); |
d046ff8b | 675 | |
2f66dcc9 GOC |
676 | #endif /* CONFIG_X86_64 */ |
677 | ||
513ad84b IM |
678 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
679 | unsigned long new_sp); | |
680 | ||
4d46a89e IM |
681 | /* |
682 | * This decides where the kernel will search for a free chunk of vm | |
683e0253 GOC |
683 | * space during mmap's. |
684 | */ | |
8f3e474f | 685 | #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3)) |
b569bab7 | 686 | #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW) |
683e0253 | 687 | |
4d46a89e | 688 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
683e0253 | 689 | |
529e25f6 EB |
690 | /* Get/set a process' ability to use the timestamp counter instruction */ |
691 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) | |
692 | #define SET_TSC_CTL(val) set_tsc_mode((val)) | |
693 | ||
694 | extern int get_tsc_mode(unsigned long adr); | |
695 | extern int set_tsc_mode(unsigned int val); | |
696 | ||
e9ea1e7f KH |
697 | DECLARE_PER_CPU(u64, msr_misc_features_shadow); |
698 | ||
4705243d | 699 | static inline u32 per_cpu_llc_id(unsigned int cpu) |
6e290323 TG |
700 | { |
701 | return per_cpu(cpu_info.topo.llc_id, cpu); | |
702 | } | |
703 | ||
4705243d | 704 | static inline u32 per_cpu_l2c_id(unsigned int cpu) |
6e290323 TG |
705 | { |
706 | return per_cpu(cpu_info.topo.l2c_id, cpu); | |
707 | } | |
9164d949 | 708 | |
bc8e80d5 | 709 | #ifdef CONFIG_CPU_SUP_AMD |
cc2749e4 | 710 | extern u32 amd_get_nodes_per_socket(void); |
3743d55b | 711 | extern u32 amd_get_highest_perf(void); |
77245f1c | 712 | extern void amd_clear_divider(void); |
566ffa3a | 713 | extern void amd_check_microcode(void); |
bc8e80d5 | 714 | #else |
bc8e80d5 | 715 | static inline u32 amd_get_nodes_per_socket(void) { return 0; } |
3743d55b | 716 | static inline u32 amd_get_highest_perf(void) { return 0; } |
77245f1c | 717 | static inline void amd_clear_divider(void) { } |
566ffa3a | 718 | static inline void amd_check_microcode(void) { } |
bc8e80d5 | 719 | #endif |
6a812691 | 720 | |
f05e798a | 721 | extern unsigned long arch_align_stack(unsigned long sp); |
e5cb113f | 722 | void free_init_pages(const char *what, unsigned long begin, unsigned long end); |
5494c3a6 | 723 | extern void free_kernel_image_pages(const char *what, void *begin, void *end); |
f05e798a DH |
724 | |
725 | void default_idle(void); | |
6a377ddc LB |
726 | #ifdef CONFIG_XEN |
727 | bool xen_set_default_idle(void); | |
728 | #else | |
729 | #define xen_set_default_idle 0 | |
730 | #endif | |
f05e798a | 731 | |
f9cdf7ca | 732 | void __noreturn stop_this_cpu(void *dummy); |
ab31c744 | 733 | void microcode_check(struct cpuinfo_x86 *prev_info); |
c0dd9245 | 734 | void store_cpu_caps(struct cpuinfo_x86 *info); |
d90a7a0e JK |
735 | |
736 | enum l1tf_mitigations { | |
737 | L1TF_MITIGATION_OFF, | |
738 | L1TF_MITIGATION_FLUSH_NOWARN, | |
739 | L1TF_MITIGATION_FLUSH, | |
740 | L1TF_MITIGATION_FLUSH_NOSMT, | |
741 | L1TF_MITIGATION_FULL, | |
742 | L1TF_MITIGATION_FULL_FORCE | |
743 | }; | |
744 | ||
745 | extern enum l1tf_mitigations l1tf_mitigation; | |
746 | ||
bc124170 TG |
747 | enum mds_mitigations { |
748 | MDS_MITIGATION_OFF, | |
749 | MDS_MITIGATION_FULL, | |
22dd8365 | 750 | MDS_MITIGATION_VMWERV, |
bc124170 TG |
751 | }; |
752 | ||
eb3515dc AB |
753 | extern bool gds_ucode_mitigated(void); |
754 | ||
04c30245 BPA |
755 | /* |
756 | * Make previous memory operations globally visible before | |
757 | * a WRMSR. | |
758 | * | |
759 | * MFENCE makes writes visible, but only affects load/store | |
760 | * instructions. WRMSR is unfortunately not a load/store | |
761 | * instruction and is unaffected by MFENCE. The LFENCE ensures | |
762 | * that the WRMSR is not reordered. | |
763 | * | |
764 | * Most WRMSRs are full serializing instructions themselves and | |
765 | * do not require this barrier. This is only required for the | |
766 | * IA32_TSC_DEADLINE and X2APIC MSRs. | |
767 | */ | |
768 | static inline void weak_wrmsr_fence(void) | |
769 | { | |
770 | alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE)); | |
771 | } | |
772 | ||
1965aae3 | 773 | #endif /* _ASM_X86_PROCESSOR_H */ |