x86: Remap GDT tables in the fixmap section
[linux-2.6-block.git] / arch / x86 / include / asm / processor.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PROCESSOR_H
2#define _ASM_X86_PROCESSOR_H
c758ecf6 3
053de044
GOC
4#include <asm/processor-flags.h>
5
683e0253
GOC
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9fda6a06 9struct vm86;
683e0253 10
2f66dcc9
GOC
11#include <asm/math_emu.h>
12#include <asm/segment.h>
2f66dcc9 13#include <asm/types.h>
decb4c41 14#include <uapi/asm/sigcontext.h>
2f66dcc9 15#include <asm/current.h>
cd4d09ec 16#include <asm/cpufeatures.h>
2f66dcc9 17#include <asm/page.h>
54321d94 18#include <asm/pgtable_types.h>
5300db88 19#include <asm/percpu.h>
2f66dcc9
GOC
20#include <asm/msr.h>
21#include <asm/desc_defs.h>
bd61643e 22#include <asm/nops.h>
f05e798a 23#include <asm/special_insns.h>
14b9675a 24#include <asm/fpu/types.h>
4d46a89e 25
2f66dcc9 26#include <linux/personality.h>
5300db88 27#include <linux/cache.h>
2f66dcc9 28#include <linux/threads.h>
5cbc19a9 29#include <linux/math64.h>
faa4602e 30#include <linux/err.h>
f05e798a
DH
31#include <linux/irqflags.h>
32
33/*
34 * We handle most unaligned accesses in hardware. On the other hand
35 * unaligned DMA can be quite expensive on some Nehalem processors.
36 *
37 * Based on this we disable the IP header alignment in network drivers.
38 */
39#define NET_IP_ALIGN 0
c72dcf83 40
b332828c 41#define HBP_NUM 4
0ccb8acc
GOC
42/*
43 * Default implementation of macro that returns current
44 * instruction pointer ("program counter").
45 */
46static inline void *current_text_addr(void)
47{
48 void *pc;
4d46a89e
IM
49
50 asm volatile("mov $1f, %0; 1:":"=r" (pc));
51
0ccb8acc
GOC
52 return pc;
53}
54
b8c1b8ea
IM
55/*
56 * These alignment constraints are for performance in the vSMP case,
57 * but in the task_struct case we must also meet hardware imposed
58 * alignment requirements of the FPU state:
59 */
dbcb4660 60#ifdef CONFIG_X86_VSMP
4d46a89e
IM
61# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
62# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
dbcb4660 63#else
b8c1b8ea 64# define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
4d46a89e 65# define ARCH_MIN_MMSTRUCT_ALIGN 0
dbcb4660
GOC
66#endif
67
e0ba94f1
AS
68enum tlb_infos {
69 ENTRIES,
70 NR_INFO
71};
72
73extern u16 __read_mostly tlb_lli_4k[NR_INFO];
74extern u16 __read_mostly tlb_lli_2m[NR_INFO];
75extern u16 __read_mostly tlb_lli_4m[NR_INFO];
76extern u16 __read_mostly tlb_lld_4k[NR_INFO];
77extern u16 __read_mostly tlb_lld_2m[NR_INFO];
78extern u16 __read_mostly tlb_lld_4m[NR_INFO];
dd360393 79extern u16 __read_mostly tlb_lld_1g[NR_INFO];
c4211f42 80
5300db88
GOC
81/*
82 * CPU type and hardware bug flags. Kept separately for each CPU.
83 * Members of this structure are referenced in head.S, so think twice
84 * before touching them. [mj]
85 */
86
87struct cpuinfo_x86 {
4d46a89e
IM
88 __u8 x86; /* CPU family */
89 __u8 x86_vendor; /* CPU vendor */
90 __u8 x86_model;
91 __u8 x86_mask;
5300db88 92#ifdef CONFIG_X86_32
4d46a89e
IM
93 char wp_works_ok; /* It doesn't on 386's */
94
95 /* Problems on some 486Dx4's and old 386's: */
4d46a89e 96 char rfu;
4d46a89e 97 char pad0;
60e019eb 98 char pad1;
5300db88 99#else
4d46a89e 100 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
b1882e68 101 int x86_tlbsize;
13c6c532 102#endif
4d46a89e
IM
103 __u8 x86_virt_bits;
104 __u8 x86_phys_bits;
105 /* CPUID returned core id bits: */
106 __u8 x86_coreid_bits;
79a8b9aa 107 __u8 cu_id;
4d46a89e
IM
108 /* Max extended CPUID function supported: */
109 __u32 extended_cpuid_level;
4d46a89e
IM
110 /* Maximum supported CPUID level, -1=no CPUID: */
111 int cpuid_level;
65fc985b 112 __u32 x86_capability[NCAPINTS + NBUGINTS];
4d46a89e
IM
113 char x86_vendor_id[16];
114 char x86_model_id[64];
115 /* in KB - valid for CPUS which support this call: */
116 int x86_cache_size;
117 int x86_cache_alignment; /* In bytes */
cbc82b17
PWJ
118 /* Cache QoS architectural values: */
119 int x86_cache_max_rmid; /* max index */
120 int x86_cache_occ_scale; /* scale to bytes */
4d46a89e
IM
121 int x86_power;
122 unsigned long loops_per_jiffy;
4d46a89e
IM
123 /* cpuid returned max cores value: */
124 u16 x86_max_cores;
125 u16 apicid;
01aaea1a 126 u16 initial_apicid;
4d46a89e 127 u16 x86_clflush_size;
4d46a89e
IM
128 /* number of cores as seen by the OS: */
129 u16 booted_cores;
130 /* Physical processor id: */
131 u16 phys_proc_id;
1f12e32f
TG
132 /* Logical processor id: */
133 u16 logical_proc_id;
4d46a89e
IM
134 /* Core id: */
135 u16 cpu_core_id;
136 /* Index into per_cpu list: */
137 u16 cpu_index;
506ed6b5 138 u32 microcode;
2c773dd3 139};
5300db88 140
47f10a36
HC
141struct cpuid_regs {
142 u32 eax, ebx, ecx, edx;
143};
144
145enum cpuid_regs_idx {
146 CPUID_EAX = 0,
147 CPUID_EBX,
148 CPUID_ECX,
149 CPUID_EDX,
150};
151
4d46a89e
IM
152#define X86_VENDOR_INTEL 0
153#define X86_VENDOR_CYRIX 1
154#define X86_VENDOR_AMD 2
155#define X86_VENDOR_UMC 3
4d46a89e
IM
156#define X86_VENDOR_CENTAUR 5
157#define X86_VENDOR_TRANSMETA 7
158#define X86_VENDOR_NSC 8
159#define X86_VENDOR_NUM 9
160
161#define X86_VENDOR_UNKNOWN 0xff
5300db88 162
1a53905a
GOC
163/*
164 * capabilities of CPUs
165 */
4d46a89e
IM
166extern struct cpuinfo_x86 boot_cpu_data;
167extern struct cpuinfo_x86 new_cpu_data;
168
169extern struct tss_struct doublefault_tss;
3e0c3737
YL
170extern __u32 cpu_caps_cleared[NCAPINTS];
171extern __u32 cpu_caps_set[NCAPINTS];
5300db88
GOC
172
173#ifdef CONFIG_SMP
2c773dd3 174DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
5300db88 175#define cpu_data(cpu) per_cpu(cpu_info, cpu)
5300db88 176#else
7b543a53 177#define cpu_info boot_cpu_data
5300db88 178#define cpu_data(cpu) boot_cpu_data
5300db88
GOC
179#endif
180
1c6c727d
JS
181extern const struct seq_operations cpuinfo_op;
182
4d46a89e
IM
183#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
184
185extern void cpu_detect(struct cpuinfo_x86 *c);
1a53905a 186
f580366f 187extern void early_cpu_init(void);
1a53905a
GOC
188extern void identify_boot_cpu(void);
189extern void identify_secondary_cpu(struct cpuinfo_x86 *);
5300db88 190extern void print_cpu_info(struct cpuinfo_x86 *);
21c3fcf3 191void print_cpu_msr(struct cpuinfo_x86 *);
5300db88 192extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
47bdf337
HC
193extern u32 get_scattered_cpuid_leaf(unsigned int level,
194 unsigned int sub_leaf,
195 enum cpuid_regs_idx reg);
5300db88 196extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
04a15418 197extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
5300db88 198
bbb65d2d 199extern void detect_extended_topology(struct cpuinfo_x86 *c);
1a53905a 200extern void detect_ht(struct cpuinfo_x86 *c);
1a53905a 201
d288e1cf
FY
202#ifdef CONFIG_X86_32
203extern int have_cpuid_p(void);
204#else
205static inline int have_cpuid_p(void)
206{
207 return 1;
208}
209#endif
c758ecf6 210static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
4d46a89e 211 unsigned int *ecx, unsigned int *edx)
c758ecf6
GOC
212{
213 /* ecx is often an input as well as an output. */
45a94d7c 214 asm volatile("cpuid"
cca2e6f8
JP
215 : "=a" (*eax),
216 "=b" (*ebx),
217 "=c" (*ecx),
218 "=d" (*edx)
506ed6b5
AK
219 : "0" (*eax), "2" (*ecx)
220 : "memory");
c758ecf6
GOC
221}
222
5dedade6
BP
223#define native_cpuid_reg(reg) \
224static inline unsigned int native_cpuid_##reg(unsigned int op) \
225{ \
226 unsigned int eax = op, ebx, ecx = 0, edx; \
227 \
228 native_cpuid(&eax, &ebx, &ecx, &edx); \
229 \
230 return reg; \
231}
232
233/*
234 * Native CPUID functions returning a single datum.
235 */
236native_cpuid_reg(eax)
237native_cpuid_reg(ebx)
238native_cpuid_reg(ecx)
239native_cpuid_reg(edx)
240
c72dcf83
GOC
241static inline void load_cr3(pgd_t *pgdir)
242{
243 write_cr3(__pa(pgdir));
244}
c758ecf6 245
ca241c75
GOC
246#ifdef CONFIG_X86_32
247/* This is the TSS defined by the hardware. */
248struct x86_hw_tss {
4d46a89e
IM
249 unsigned short back_link, __blh;
250 unsigned long sp0;
251 unsigned short ss0, __ss0h;
cf9328cc 252 unsigned long sp1;
76e4c490
AL
253
254 /*
cf9328cc
AL
255 * We don't use ring 1, so ss1 is a convenient scratch space in
256 * the same cacheline as sp0. We use ss1 to cache the value in
257 * MSR_IA32_SYSENTER_CS. When we context switch
258 * MSR_IA32_SYSENTER_CS, we first check if the new value being
259 * written matches ss1, and, if it's not, then we wrmsr the new
260 * value and update ss1.
76e4c490 261 *
cf9328cc
AL
262 * The only reason we context switch MSR_IA32_SYSENTER_CS is
263 * that we set it to zero in vm86 tasks to avoid corrupting the
264 * stack if we were to go through the sysenter path from vm86
265 * mode.
76e4c490 266 */
76e4c490
AL
267 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
268
269 unsigned short __ss1h;
4d46a89e
IM
270 unsigned long sp2;
271 unsigned short ss2, __ss2h;
272 unsigned long __cr3;
273 unsigned long ip;
274 unsigned long flags;
275 unsigned long ax;
276 unsigned long cx;
277 unsigned long dx;
278 unsigned long bx;
279 unsigned long sp;
280 unsigned long bp;
281 unsigned long si;
282 unsigned long di;
283 unsigned short es, __esh;
284 unsigned short cs, __csh;
285 unsigned short ss, __ssh;
286 unsigned short ds, __dsh;
287 unsigned short fs, __fsh;
288 unsigned short gs, __gsh;
289 unsigned short ldt, __ldth;
290 unsigned short trace;
291 unsigned short io_bitmap_base;
292
ca241c75
GOC
293} __attribute__((packed));
294#else
295struct x86_hw_tss {
4d46a89e
IM
296 u32 reserved1;
297 u64 sp0;
298 u64 sp1;
299 u64 sp2;
300 u64 reserved2;
301 u64 ist[7];
302 u32 reserved3;
303 u32 reserved4;
304 u16 reserved5;
305 u16 io_bitmap_base;
306
d3273dea 307} __attribute__((packed));
ca241c75
GOC
308#endif
309
310/*
4d46a89e 311 * IO-bitmap sizes:
ca241c75 312 */
4d46a89e
IM
313#define IO_BITMAP_BITS 65536
314#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
315#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
316#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
317#define INVALID_IO_BITMAP_OFFSET 0x8000
ca241c75
GOC
318
319struct tss_struct {
4d46a89e
IM
320 /*
321 * The hardware state:
322 */
323 struct x86_hw_tss x86_tss;
ca241c75
GOC
324
325 /*
326 * The extra 1 is there because the CPU will access an
327 * additional byte beyond the end of the IO permission
328 * bitmap. The extra byte must be all 1 bits, and must
329 * be within the limit.
330 */
4d46a89e 331 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
4d46a89e 332
6dcc9414 333#ifdef CONFIG_X86_32
ca241c75 334 /*
2a41aa4f 335 * Space for the temporary SYSENTER stack.
ca241c75 336 */
2a41aa4f 337 unsigned long SYSENTER_stack_canary;
d828c71f 338 unsigned long SYSENTER_stack[64];
6dcc9414 339#endif
4d46a89e 340
84e65b0a 341} ____cacheline_aligned;
ca241c75 342
24933b82 343DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
ca241c75 344
4f53ab14
AL
345/*
346 * sizeof(unsigned long) coming from an extra "long" at the end
347 * of the iobitmap.
348 *
349 * -1? seg base+limit should be pointing to the address of the
350 * last valid byte
351 */
352#define __KERNEL_TSS_LIMIT \
353 (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)
354
a7fcf28d
AL
355#ifdef CONFIG_X86_32
356DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
357#endif
358
4d46a89e
IM
359/*
360 * Save the original ist values for checking stack pointers during debugging
361 */
1a53905a 362struct orig_ist {
4d46a89e 363 unsigned long ist[7];
1a53905a
GOC
364};
365
fe676203 366#ifdef CONFIG_X86_64
2f66dcc9 367DECLARE_PER_CPU(struct orig_ist, orig_ist);
26f80bd6 368
947e76cd
BG
369union irq_stack_union {
370 char irq_stack[IRQ_STACK_SIZE];
371 /*
372 * GCC hardcodes the stack canary as %gs:40. Since the
373 * irq_stack is the object at %gs:0, we reserve the bottom
374 * 48 bytes of the irq stack for the canary.
375 */
376 struct {
377 char gs_base[40];
378 unsigned long stack_canary;
379 };
380};
381
277d5b40 382DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
2add8e23
BG
383DECLARE_INIT_PER_CPU(irq_stack_union);
384
26f80bd6 385DECLARE_PER_CPU(char *, irq_stack_ptr);
9766cdbc 386DECLARE_PER_CPU(unsigned int, irq_count);
9766cdbc 387extern asmlinkage void ignore_sysret(void);
60a5317f
TH
388#else /* X86_64 */
389#ifdef CONFIG_CC_STACKPROTECTOR
1ea0d14e
JF
390/*
391 * Make sure stack canary segment base is cached-aligned:
392 * "For Intel Atom processors, avoid non zero segment base address
393 * that is not aligned to cache line boundary at all cost."
394 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
395 */
396struct stack_canary {
397 char __pad[20]; /* canary at %gs:20 */
398 unsigned long canary;
399};
53f82452 400DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
96a388de 401#endif
198d208d
SR
402/*
403 * per-CPU IRQ handling stacks
404 */
405struct irq_stack {
406 u32 stack[THREAD_SIZE/sizeof(u32)];
407} __aligned(THREAD_SIZE);
408
409DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
410DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
60a5317f 411#endif /* X86_64 */
c758ecf6 412
bf15a8cf 413extern unsigned int fpu_kernel_xstate_size;
a1141e0b 414extern unsigned int fpu_user_xstate_size;
683e0253 415
24f1e32c
FW
416struct perf_event;
417
13d4ea09
AL
418typedef struct {
419 unsigned long seg;
420} mm_segment_t;
421
cb38d377 422struct thread_struct {
4d46a89e
IM
423 /* Cached TLS descriptors: */
424 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
425 unsigned long sp0;
426 unsigned long sp;
cb38d377 427#ifdef CONFIG_X86_32
4d46a89e 428 unsigned long sysenter_cs;
cb38d377 429#else
4d46a89e
IM
430 unsigned short es;
431 unsigned short ds;
432 unsigned short fsindex;
433 unsigned short gsindex;
cb38d377 434#endif
b9d989c7
AL
435
436 u32 status; /* thread synchronous flags */
437
d756f4ad 438#ifdef CONFIG_X86_64
296f781a
AL
439 unsigned long fsbase;
440 unsigned long gsbase;
441#else
442 /*
443 * XXX: this could presumably be unsigned short. Alternatively,
444 * 32-bit kernels could be taught to use fsindex instead.
445 */
446 unsigned long fs;
447 unsigned long gs;
d756f4ad 448#endif
c5bedc68 449
24f1e32c
FW
450 /* Save middle states of ptrace breakpoints */
451 struct perf_event *ptrace_bps[HBP_NUM];
452 /* Debug status used for traps, single steps, etc... */
453 unsigned long debugreg6;
326264a0
FW
454 /* Keep track of the exact dr7 value set by the user */
455 unsigned long ptrace_dr7;
4d46a89e
IM
456 /* Fault info: */
457 unsigned long cr2;
51e7dc70 458 unsigned long trap_nr;
4d46a89e 459 unsigned long error_code;
9fda6a06 460#ifdef CONFIG_VM86
4d46a89e 461 /* Virtual 86 mode info */
9fda6a06 462 struct vm86 *vm86;
cb38d377 463#endif
4d46a89e
IM
464 /* IO permissions: */
465 unsigned long *io_bitmap_ptr;
466 unsigned long iopl;
467 /* Max allowed port in the bitmap, in bytes: */
468 unsigned io_bitmap_max;
0c8c0f03 469
13d4ea09
AL
470 mm_segment_t addr_limit;
471
2a53ccbc 472 unsigned int sig_on_uaccess_err:1;
dfa9a942
AL
473 unsigned int uaccess_err:1; /* uaccess failed */
474
0c8c0f03
DH
475 /* Floating point and extended processor state */
476 struct fpu fpu;
477 /*
478 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
479 * the end.
480 */
cb38d377
GOC
481};
482
b9d989c7
AL
483/*
484 * Thread-synchronous status.
485 *
486 * This is different from the flags in that nobody else
487 * ever touches our thread-synchronous status, so we don't
488 * have to worry about atomic accesses.
489 */
490#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
491
62d7d7ed
GOC
492/*
493 * Set IOPL bits in EFLAGS from given mask
494 */
495static inline void native_set_iopl_mask(unsigned mask)
496{
497#ifdef CONFIG_X86_32
498 unsigned int reg;
4d46a89e 499
cca2e6f8
JP
500 asm volatile ("pushfl;"
501 "popl %0;"
502 "andl %1, %0;"
503 "orl %2, %0;"
504 "pushl %0;"
505 "popfl"
506 : "=&r" (reg)
507 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
62d7d7ed
GOC
508#endif
509}
510
4d46a89e
IM
511static inline void
512native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
7818a1e0
GOC
513{
514 tss->x86_tss.sp0 = thread->sp0;
515#ifdef CONFIG_X86_32
4d46a89e 516 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
7818a1e0
GOC
517 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
518 tss->x86_tss.ss1 = thread->sysenter_cs;
519 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
520 }
521#endif
522}
1b46cbe0 523
e801f864
GOC
524static inline void native_swapgs(void)
525{
526#ifdef CONFIG_X86_64
527 asm volatile("swapgs" ::: "memory");
528#endif
529}
530
a7fcf28d 531static inline unsigned long current_top_of_stack(void)
8ef46a67 532{
a7fcf28d 533#ifdef CONFIG_X86_64
24933b82 534 return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
a7fcf28d
AL
535#else
536 /* sp0 on x86_32 is special in and around vm86 mode. */
537 return this_cpu_read_stable(cpu_current_top_of_stack);
538#endif
8ef46a67
AL
539}
540
7818a1e0
GOC
541#ifdef CONFIG_PARAVIRT
542#include <asm/paravirt.h>
543#else
4d46a89e 544#define __cpuid native_cpuid
1b46cbe0 545
cca2e6f8
JP
546static inline void load_sp0(struct tss_struct *tss,
547 struct thread_struct *thread)
7818a1e0
GOC
548{
549 native_load_sp0(tss, thread);
550}
551
62d7d7ed 552#define set_iopl_mask native_set_iopl_mask
1b46cbe0
GOC
553#endif /* CONFIG_PARAVIRT */
554
683e0253
GOC
555/* Free all resources held by a thread. */
556extern void release_thread(struct task_struct *);
557
683e0253 558unsigned long get_wchan(struct task_struct *p);
c758ecf6
GOC
559
560/*
561 * Generic CPUID function
562 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
563 * resulting in stale register contents being returned.
564 */
565static inline void cpuid(unsigned int op,
566 unsigned int *eax, unsigned int *ebx,
567 unsigned int *ecx, unsigned int *edx)
568{
569 *eax = op;
570 *ecx = 0;
571 __cpuid(eax, ebx, ecx, edx);
572}
573
574/* Some CPUID calls want 'count' to be placed in ecx */
575static inline void cpuid_count(unsigned int op, int count,
576 unsigned int *eax, unsigned int *ebx,
577 unsigned int *ecx, unsigned int *edx)
578{
579 *eax = op;
580 *ecx = count;
581 __cpuid(eax, ebx, ecx, edx);
582}
583
584/*
585 * CPUID functions returning a single datum
586 */
587static inline unsigned int cpuid_eax(unsigned int op)
588{
589 unsigned int eax, ebx, ecx, edx;
590
591 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 592
c758ecf6
GOC
593 return eax;
594}
4d46a89e 595
c758ecf6
GOC
596static inline unsigned int cpuid_ebx(unsigned int op)
597{
598 unsigned int eax, ebx, ecx, edx;
599
600 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 601
c758ecf6
GOC
602 return ebx;
603}
4d46a89e 604
c758ecf6
GOC
605static inline unsigned int cpuid_ecx(unsigned int op)
606{
607 unsigned int eax, ebx, ecx, edx;
608
609 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 610
c758ecf6
GOC
611 return ecx;
612}
4d46a89e 613
c758ecf6
GOC
614static inline unsigned int cpuid_edx(unsigned int op)
615{
616 unsigned int eax, ebx, ecx, edx;
617
618 cpuid(op, &eax, &ebx, &ecx, &edx);
4d46a89e 619
c758ecf6
GOC
620 return edx;
621}
622
683e0253 623/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
0b101e62 624static __always_inline void rep_nop(void)
683e0253 625{
cca2e6f8 626 asm volatile("rep; nop" ::: "memory");
683e0253
GOC
627}
628
0b101e62 629static __always_inline void cpu_relax(void)
4d46a89e
IM
630{
631 rep_nop();
632}
633
c198b121
AL
634/*
635 * This function forces the icache and prefetched instruction stream to
636 * catch up with reality in two very specific cases:
637 *
638 * a) Text was modified using one virtual address and is about to be executed
639 * from the same physical page at a different virtual address.
640 *
641 * b) Text was modified on a different CPU, may subsequently be
642 * executed on this CPU, and you want to make sure the new version
643 * gets executed. This generally means you're calling this in a IPI.
644 *
645 * If you're calling this for a different reason, you're probably doing
646 * it wrong.
647 */
683e0253
GOC
648static inline void sync_core(void)
649{
45c39fb0 650 /*
c198b121
AL
651 * There are quite a few ways to do this. IRET-to-self is nice
652 * because it works on every CPU, at any CPL (so it's compatible
653 * with paravirtualization), and it never exits to a hypervisor.
654 * The only down sides are that it's a bit slow (it seems to be
655 * a bit more than 2x slower than the fastest options) and that
656 * it unmasks NMIs. The "push %cs" is needed because, in
657 * paravirtual environments, __KERNEL_CS may not be a valid CS
658 * value when we do IRET directly.
659 *
660 * In case NMI unmasking or performance ever becomes a problem,
661 * the next best option appears to be MOV-to-CR2 and an
662 * unconditional jump. That sequence also works on all CPUs,
663 * but it will fault at CPL3 (i.e. Xen PV and lguest).
664 *
665 * CPUID is the conventional way, but it's nasty: it doesn't
666 * exist on some 486-like CPUs, and it usually exits to a
667 * hypervisor.
668 *
669 * Like all of Linux's memory ordering operations, this is a
670 * compiler barrier as well.
45c39fb0 671 */
c198b121
AL
672 register void *__sp asm(_ASM_SP);
673
674#ifdef CONFIG_X86_32
675 asm volatile (
676 "pushfl\n\t"
677 "pushl %%cs\n\t"
678 "pushl $1f\n\t"
679 "iret\n\t"
680 "1:"
681 : "+r" (__sp) : : "memory");
45c39fb0 682#else
c198b121
AL
683 unsigned int tmp;
684
685 asm volatile (
686 "mov %%ss, %0\n\t"
687 "pushq %q0\n\t"
688 "pushq %%rsp\n\t"
689 "addq $8, (%%rsp)\n\t"
690 "pushfq\n\t"
691 "mov %%cs, %0\n\t"
692 "pushq %q0\n\t"
693 "pushq $1f\n\t"
694 "iretq\n\t"
695 "1:"
696 : "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
5367b688 697#endif
683e0253
GOC
698}
699
683e0253 700extern void select_idle_routine(const struct cpuinfo_x86 *c);
07c94a38 701extern void amd_e400_c1e_apic_setup(void);
683e0253 702
4d46a89e 703extern unsigned long boot_option_idle_override;
683e0253 704
d1896049 705enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
69fb3676 706 IDLE_POLL};
d1896049 707
1a53905a
GOC
708extern void enable_sep_cpu(void);
709extern int sysenter_setup(void);
710
29c84391 711extern void early_trap_init(void);
8170e6be 712void early_trap_pf_init(void);
29c84391 713
1a53905a 714/* Defined in head.S */
4d46a89e 715extern struct desc_ptr early_gdt_descr;
1a53905a
GOC
716
717extern void cpu_set_gdt(int);
552be871 718extern void switch_to_new_gdt(int);
69218e47 719extern void load_fixmap_gdt(int);
11e3a840 720extern void load_percpu_segment(int);
1a53905a 721extern void cpu_init(void);
1a53905a 722
c2724775
MM
723static inline unsigned long get_debugctlmsr(void)
724{
ea8e61b7 725 unsigned long debugctlmsr = 0;
c2724775
MM
726
727#ifndef CONFIG_X86_DEBUGCTLMSR
728 if (boot_cpu_data.x86 < 6)
729 return 0;
730#endif
731 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
732
ea8e61b7 733 return debugctlmsr;
c2724775
MM
734}
735
5b0e5084
JB
736static inline void update_debugctlmsr(unsigned long debugctlmsr)
737{
738#ifndef CONFIG_X86_DEBUGCTLMSR
739 if (boot_cpu_data.x86 < 6)
740 return;
741#endif
742 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
743}
744
9bd1190a
ON
745extern void set_task_blockstep(struct task_struct *task, bool on);
746
4d46a89e
IM
747/* Boot loader type from the setup header: */
748extern int bootloader_type;
5031296c 749extern int bootloader_version;
1a53905a 750
4d46a89e 751extern char ignore_fpu_irq;
683e0253
GOC
752
753#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
754#define ARCH_HAS_PREFETCHW
755#define ARCH_HAS_SPINLOCK_PREFETCH
756
ae2e15eb 757#ifdef CONFIG_X86_32
a930dc45 758# define BASE_PREFETCH ""
4d46a89e 759# define ARCH_HAS_PREFETCH
ae2e15eb 760#else
a930dc45 761# define BASE_PREFETCH "prefetcht0 %P1"
ae2e15eb
GOC
762#endif
763
4d46a89e
IM
764/*
765 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
766 *
767 * It's not worth to care about 3dnow prefetches for the K6
768 * because they are microcoded there and very slow.
769 */
ae2e15eb
GOC
770static inline void prefetch(const void *x)
771{
a930dc45 772 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
ae2e15eb 773 X86_FEATURE_XMM,
a930dc45 774 "m" (*(const char *)x));
ae2e15eb
GOC
775}
776
4d46a89e
IM
777/*
778 * 3dnow prefetch to get an exclusive cache line.
779 * Useful for spinlocks to avoid one state transition in the
780 * cache coherency protocol:
781 */
ae2e15eb
GOC
782static inline void prefetchw(const void *x)
783{
a930dc45
BP
784 alternative_input(BASE_PREFETCH, "prefetchw %P1",
785 X86_FEATURE_3DNOWPREFETCH,
786 "m" (*(const char *)x));
ae2e15eb
GOC
787}
788
4d46a89e
IM
789static inline void spin_lock_prefetch(const void *x)
790{
791 prefetchw(x);
792}
793
d9e05cc5
AL
794#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
795 TOP_OF_KERNEL_STACK_PADDING)
796
2f66dcc9
GOC
797#ifdef CONFIG_X86_32
798/*
799 * User space process size: 3GB (default).
800 */
8f3e474f 801#define IA32_PAGE_OFFSET PAGE_OFFSET
4d46a89e 802#define TASK_SIZE PAGE_OFFSET
d9517346 803#define TASK_SIZE_MAX TASK_SIZE
4d46a89e
IM
804#define STACK_TOP TASK_SIZE
805#define STACK_TOP_MAX STACK_TOP
806
807#define INIT_THREAD { \
d9e05cc5 808 .sp0 = TOP_OF_INIT_STACK, \
4d46a89e
IM
809 .sysenter_cs = __KERNEL_CS, \
810 .io_bitmap_ptr = NULL, \
13d4ea09 811 .addr_limit = KERNEL_DS, \
2f66dcc9
GOC
812}
813
2f66dcc9 814/*
5c39403e 815 * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
2f66dcc9 816 * This is necessary to guarantee that the entire "struct pt_regs"
b595076a 817 * is accessible even if the CPU haven't stored the SS/ESP registers
2f66dcc9
GOC
818 * on the stack (interrupt gate does not save these registers
819 * when switching to the same priv ring).
820 * Therefore beware: accessing the ss/esp fields of the
821 * "struct pt_regs" is possible, but they may contain the
822 * completely wrong values.
823 */
5c39403e
DV
824#define task_pt_regs(task) \
825({ \
826 unsigned long __ptr = (unsigned long)task_stack_page(task); \
827 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
828 ((struct pt_regs *)__ptr) - 1; \
2f66dcc9
GOC
829})
830
4d46a89e 831#define KSTK_ESP(task) (task_pt_regs(task)->sp)
2f66dcc9
GOC
832
833#else
834/*
07114f0f
AL
835 * User space process size. 47bits minus one guard page. The guard
836 * page is necessary on Intel CPUs: if a SYSCALL instruction is at
837 * the highest possible canonical userspace address, then that
838 * syscall will enter the kernel with a non-canonical return
839 * address, and SYSRET will explode dangerously. We avoid this
840 * particular problem by preventing anything from being mapped
841 * at the maximum canonical address.
2f66dcc9 842 */
d9517346 843#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
2f66dcc9
GOC
844
845/* This decides where the kernel will search for a free chunk of vm
846 * space during mmap's.
847 */
4d46a89e
IM
848#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
849 0xc0000000 : 0xFFFFe000)
2f66dcc9 850
6bd33008 851#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
d9517346 852 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
6bd33008 853#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
d9517346 854 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
2f66dcc9 855
922a70d3 856#define STACK_TOP TASK_SIZE
d9517346 857#define STACK_TOP_MAX TASK_SIZE_MAX
922a70d3 858
13d4ea09
AL
859#define INIT_THREAD { \
860 .sp0 = TOP_OF_INIT_STACK, \
861 .addr_limit = KERNEL_DS, \
2f66dcc9
GOC
862}
863
4d46a89e 864#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
89240ba0 865extern unsigned long KSTK_ESP(struct task_struct *task);
d046ff8b 866
2f66dcc9
GOC
867#endif /* CONFIG_X86_64 */
868
ffcb043b
BG
869extern unsigned long thread_saved_pc(struct task_struct *tsk);
870
513ad84b
IM
871extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
872 unsigned long new_sp);
873
4d46a89e
IM
874/*
875 * This decides where the kernel will search for a free chunk of vm
683e0253
GOC
876 * space during mmap's.
877 */
8f3e474f
DS
878#define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
879#define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE)
683e0253 880
4d46a89e 881#define KSTK_EIP(task) (task_pt_regs(task)->ip)
683e0253 882
529e25f6
EB
883/* Get/set a process' ability to use the timestamp counter instruction */
884#define GET_TSC_CTL(adr) get_tsc_mode((adr))
885#define SET_TSC_CTL(val) set_tsc_mode((val))
886
887extern int get_tsc_mode(unsigned long adr);
888extern int set_tsc_mode(unsigned int val);
889
fe3d197f 890/* Register/unregister a process' MPX related resource */
46a6e0cf
DH
891#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
892#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
fe3d197f
DH
893
894#ifdef CONFIG_X86_INTEL_MPX
46a6e0cf
DH
895extern int mpx_enable_management(void);
896extern int mpx_disable_management(void);
fe3d197f 897#else
46a6e0cf 898static inline int mpx_enable_management(void)
fe3d197f
DH
899{
900 return -EINVAL;
901}
46a6e0cf 902static inline int mpx_disable_management(void)
fe3d197f
DH
903{
904 return -EINVAL;
905}
906#endif /* CONFIG_X86_INTEL_MPX */
907
8b84c8df 908extern u16 amd_get_nb_id(int cpu);
cc2749e4 909extern u32 amd_get_nodes_per_socket(void);
6a812691 910
96e39ac0
JW
911static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
912{
913 uint32_t base, eax, signature[3];
914
915 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
916 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
917
918 if (!memcmp(sig, signature, 12) &&
919 (leaves == 0 || ((eax - base) >= leaves)))
920 return base;
921 }
922
923 return 0;
924}
925
f05e798a
DH
926extern unsigned long arch_align_stack(unsigned long sp);
927extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
928
929void default_idle(void);
6a377ddc
LB
930#ifdef CONFIG_XEN
931bool xen_set_default_idle(void);
932#else
933#define xen_set_default_idle 0
934#endif
f05e798a
DH
935
936void stop_this_cpu(void *dummy);
4d067d8e 937void df_debug(struct pt_regs *regs, long error_code);
1965aae3 938#endif /* _ASM_X86_PROCESSOR_H */