x86: Add workaround to NMI iret woes
[linux-2.6-block.git] / arch / x86 / kernel / cpu / common.c
CommitLineData
f0fc4aff 1#include <linux/bootmem.h>
9766cdbc 2#include <linux/linkage.h>
f0fc4aff 3#include <linux/bitops.h>
9766cdbc 4#include <linux/kernel.h>
f0fc4aff 5#include <linux/module.h>
9766cdbc
JSR
6#include <linux/percpu.h>
7#include <linux/string.h>
1da177e4 8#include <linux/delay.h>
9766cdbc
JSR
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
1da177e4 12#include <linux/smp.h>
9766cdbc
JSR
13#include <linux/io.h>
14
15#include <asm/stackprotector.h>
cdd6c482 16#include <asm/perf_event.h>
1da177e4 17#include <asm/mmu_context.h>
49d859d7 18#include <asm/archrandom.h>
9766cdbc
JSR
19#include <asm/hypervisor.h>
20#include <asm/processor.h>
21#include <asm/sections.h>
8bdbd962
AC
22#include <linux/topology.h>
23#include <linux/cpumask.h>
9766cdbc 24#include <asm/pgtable.h>
60063497 25#include <linux/atomic.h>
9766cdbc
JSR
26#include <asm/proto.h>
27#include <asm/setup.h>
28#include <asm/apic.h>
29#include <asm/desc.h>
30#include <asm/i387.h>
27b07da7 31#include <asm/mtrr.h>
8bdbd962 32#include <linux/numa.h>
9766cdbc
JSR
33#include <asm/asm.h>
34#include <asm/cpu.h>
a03a3e28 35#include <asm/mce.h>
9766cdbc 36#include <asm/msr.h>
8d4a4300 37#include <asm/pat.h>
e641f5f5
IM
38
39#ifdef CONFIG_X86_LOCAL_APIC
bdbcdd48 40#include <asm/uv/uv.h>
1da177e4
LT
41#endif
42
43#include "cpu.h"
44
c2d1cec1 45/* all of these masks are initialized in setup_cpu_local_masks() */
c2d1cec1 46cpumask_var_t cpu_initialized_mask;
9766cdbc
JSR
47cpumask_var_t cpu_callout_mask;
48cpumask_var_t cpu_callin_mask;
c2d1cec1
MT
49
50/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask;
52
2f2f52ba 53/* correctly size the local cpu masks */
4369f1fb 54void __init setup_cpu_local_masks(void)
2f2f52ba
BG
55{
56 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
57 alloc_bootmem_cpumask_var(&cpu_callin_mask);
58 alloc_bootmem_cpumask_var(&cpu_callout_mask);
59 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
60}
61
e8055139
OZ
62static void __cpuinit default_init(struct cpuinfo_x86 *c)
63{
64#ifdef CONFIG_X86_64
27c13ece 65 cpu_detect_cache_sizes(c);
e8055139
OZ
66#else
67 /* Not much we can do here... */
68 /* Check if at least it has cpuid */
69 if (c->cpuid_level == -1) {
70 /* No cpuid. It must be an ancient CPU */
71 if (c->x86 == 4)
72 strcpy(c->x86_model_id, "486");
73 else if (c->x86 == 3)
74 strcpy(c->x86_model_id, "386");
75 }
76#endif
77}
78
79static const struct cpu_dev __cpuinitconst default_cpu = {
80 .c_init = default_init,
81 .c_vendor = "Unknown",
82 .c_x86_vendor = X86_VENDOR_UNKNOWN,
83};
84
85static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
0a488a53 86
06deef89 87DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
950ad7ff 88#ifdef CONFIG_X86_64
06deef89
BG
89 /*
90 * We need valid kernel segments for data and code in long mode too
91 * IRET will check the segment types kkeil 2000/10/28
92 * Also sysret mandates a special GDT layout
93 *
9766cdbc 94 * TLS descriptors are currently at a different place compared to i386.
06deef89
BG
95 * Hopefully nobody expects them at a fixed place (Wine?)
96 */
1e5de182
AM
97 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
98 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
99 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
100 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
101 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
102 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
950ad7ff 103#else
1e5de182
AM
104 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
105 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
106 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
107 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
bf504672
RR
108 /*
109 * Segments used for calling PnP BIOS have byte granularity.
110 * They code segments and data segments have fixed 64k limits,
111 * the transfer segment sizes are set at run time.
112 */
6842ef0e 113 /* 32-bit code */
1e5de182 114 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
6842ef0e 115 /* 16-bit code */
1e5de182 116 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 117 /* 16-bit data */
1e5de182 118 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
6842ef0e 119 /* 16-bit data */
1e5de182 120 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
6842ef0e 121 /* 16-bit data */
1e5de182 122 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
bf504672
RR
123 /*
124 * The APM segments have byte granularity and their bases
125 * are set at run time. All have 64k limits.
126 */
6842ef0e 127 /* 32-bit code */
1e5de182 128 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
bf504672 129 /* 16-bit code */
1e5de182 130 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
6842ef0e 131 /* data */
72c4d853 132 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
bf504672 133
1e5de182
AM
134 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
135 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
60a5317f 136 GDT_STACK_CANARY_INIT
950ad7ff 137#endif
06deef89 138} };
7a61d35d 139EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
ae1ee11b 140
0c752a93
SS
141static int __init x86_xsave_setup(char *s)
142{
143 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
6bad06b7 144 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
0c752a93
SS
145 return 1;
146}
147__setup("noxsave", x86_xsave_setup);
148
6bad06b7
SS
149static int __init x86_xsaveopt_setup(char *s)
150{
151 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
152 return 1;
153}
154__setup("noxsaveopt", x86_xsaveopt_setup);
155
ba51dced 156#ifdef CONFIG_X86_32
3bc9b76b 157static int cachesize_override __cpuinitdata = -1;
3bc9b76b 158static int disable_x86_serial_nr __cpuinitdata = 1;
1da177e4 159
0a488a53
YL
160static int __init cachesize_setup(char *str)
161{
162 get_option(&str, &cachesize_override);
163 return 1;
164}
165__setup("cachesize=", cachesize_setup);
166
0a488a53
YL
167static int __init x86_fxsr_setup(char *s)
168{
169 setup_clear_cpu_cap(X86_FEATURE_FXSR);
170 setup_clear_cpu_cap(X86_FEATURE_XMM);
171 return 1;
172}
173__setup("nofxsr", x86_fxsr_setup);
174
175static int __init x86_sep_setup(char *s)
176{
177 setup_clear_cpu_cap(X86_FEATURE_SEP);
178 return 1;
179}
180__setup("nosep", x86_sep_setup);
181
182/* Standard macro to see if a specific flag is changeable */
183static inline int flag_is_changeable_p(u32 flag)
184{
185 u32 f1, f2;
186
94f6bac1
KH
187 /*
188 * Cyrix and IDT cpus allow disabling of CPUID
189 * so the code below may return different results
190 * when it is executed before and after enabling
191 * the CPUID. Add "volatile" to not allow gcc to
192 * optimize the subsequent calls to this function.
193 */
0f3fa48a
IM
194 asm volatile ("pushfl \n\t"
195 "pushfl \n\t"
196 "popl %0 \n\t"
197 "movl %0, %1 \n\t"
198 "xorl %2, %0 \n\t"
199 "pushl %0 \n\t"
200 "popfl \n\t"
201 "pushfl \n\t"
202 "popl %0 \n\t"
203 "popfl \n\t"
204
94f6bac1
KH
205 : "=&r" (f1), "=&r" (f2)
206 : "ir" (flag));
0a488a53
YL
207
208 return ((f1^f2) & flag) != 0;
209}
210
211/* Probe for the CPUID instruction */
212static int __cpuinit have_cpuid_p(void)
213{
214 return flag_is_changeable_p(X86_EFLAGS_ID);
215}
216
217static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
218{
0f3fa48a
IM
219 unsigned long lo, hi;
220
221 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
222 return;
223
224 /* Disable processor serial number: */
225
226 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
227 lo |= 0x200000;
228 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
229
230 printk(KERN_NOTICE "CPU serial number disabled.\n");
231 clear_cpu_cap(c, X86_FEATURE_PN);
232
233 /* Disabling the serial number may affect the cpuid level */
234 c->cpuid_level = cpuid_eax(0);
0a488a53
YL
235}
236
237static int __init x86_serial_nr_setup(char *s)
238{
239 disable_x86_serial_nr = 0;
240 return 1;
241}
242__setup("serialnumber", x86_serial_nr_setup);
ba51dced 243#else
102bbe3a
YL
244static inline int flag_is_changeable_p(u32 flag)
245{
246 return 1;
247}
ba51dced
YL
248/* Probe for the CPUID instruction */
249static inline int have_cpuid_p(void)
250{
251 return 1;
252}
102bbe3a
YL
253static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
254{
255}
ba51dced 256#endif
0a488a53 257
82da65da 258static int disable_smep __cpuinitdata;
de5397ad
FY
259static __init int setup_disable_smep(char *arg)
260{
261 disable_smep = 1;
262 return 1;
263}
264__setup("nosmep", setup_disable_smep);
265
82da65da 266static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
de5397ad
FY
267{
268 if (cpu_has(c, X86_FEATURE_SMEP)) {
269 if (unlikely(disable_smep)) {
270 setup_clear_cpu_cap(X86_FEATURE_SMEP);
271 clear_in_cr4(X86_CR4_SMEP);
272 } else
273 set_in_cr4(X86_CR4_SMEP);
274 }
275}
276
b38b0665
PA
277/*
278 * Some CPU features depend on higher CPUID levels, which may not always
279 * be available due to CPUID level capping or broken virtualization
280 * software. Add those features to this table to auto-disable them.
281 */
282struct cpuid_dependent_feature {
283 u32 feature;
284 u32 level;
285};
0f3fa48a 286
b38b0665
PA
287static const struct cpuid_dependent_feature __cpuinitconst
288cpuid_dependent_features[] = {
289 { X86_FEATURE_MWAIT, 0x00000005 },
290 { X86_FEATURE_DCA, 0x00000009 },
291 { X86_FEATURE_XSAVE, 0x0000000d },
292 { 0, 0 }
293};
294
295static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
296{
297 const struct cpuid_dependent_feature *df;
9766cdbc 298
b38b0665 299 for (df = cpuid_dependent_features; df->feature; df++) {
0f3fa48a
IM
300
301 if (!cpu_has(c, df->feature))
302 continue;
b38b0665
PA
303 /*
304 * Note: cpuid_level is set to -1 if unavailable, but
305 * extended_extended_level is set to 0 if unavailable
306 * and the legitimate extended levels are all negative
307 * when signed; hence the weird messing around with
308 * signs here...
309 */
0f3fa48a 310 if (!((s32)df->level < 0 ?
f6db44df 311 (u32)df->level > (u32)c->extended_cpuid_level :
0f3fa48a
IM
312 (s32)df->level > (s32)c->cpuid_level))
313 continue;
314
315 clear_cpu_cap(c, df->feature);
316 if (!warn)
317 continue;
318
319 printk(KERN_WARNING
320 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
321 x86_cap_flags[df->feature], df->level);
b38b0665 322 }
f6db44df 323}
b38b0665 324
102bbe3a
YL
325/*
326 * Naming convention should be: <Name> [(<Codename>)]
327 * This table only is used unless init_<vendor>() below doesn't set it;
0f3fa48a
IM
328 * in particular, if CPUID levels 0x80000002..4 are supported, this
329 * isn't used
102bbe3a
YL
330 */
331
332/* Look up CPU names by table lookup. */
02dde8b4 333static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
102bbe3a 334{
02dde8b4 335 const struct cpu_model_info *info;
102bbe3a
YL
336
337 if (c->x86_model >= 16)
338 return NULL; /* Range check */
339
340 if (!this_cpu)
341 return NULL;
342
343 info = this_cpu->c_models;
344
345 while (info && info->family) {
346 if (info->family == c->x86)
347 return info->model_names[c->x86_model];
348 info++;
349 }
350 return NULL; /* Not found */
351}
352
3e0c3737
YL
353__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
354__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
7d851c8d 355
11e3a840
JF
356void load_percpu_segment(int cpu)
357{
358#ifdef CONFIG_X86_32
359 loadsegment(fs, __KERNEL_PERCPU);
360#else
361 loadsegment(gs, 0);
362 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
363#endif
60a5317f 364 load_stack_canary_segment();
11e3a840
JF
365}
366
0f3fa48a
IM
367/*
368 * Current gdt points %fs at the "master" per-cpu area: after this,
369 * it's on the real one.
370 */
552be871 371void switch_to_new_gdt(int cpu)
9d31d35b
YL
372{
373 struct desc_ptr gdt_descr;
374
2697fbd5 375 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
9d31d35b
YL
376 gdt_descr.size = GDT_SIZE - 1;
377 load_gdt(&gdt_descr);
2697fbd5 378 /* Reload the per-cpu base */
11e3a840
JF
379
380 load_percpu_segment(cpu);
9d31d35b
YL
381}
382
02dde8b4 383static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
1da177e4 384
1b05d60d 385static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
386{
387 unsigned int *v;
388 char *p, *q;
389
3da99c97 390 if (c->extended_cpuid_level < 0x80000004)
1b05d60d 391 return;
1da177e4 392
0f3fa48a 393 v = (unsigned int *)c->x86_model_id;
1da177e4
LT
394 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
395 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
396 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
397 c->x86_model_id[48] = 0;
398
0f3fa48a
IM
399 /*
400 * Intel chips right-justify this string for some dumb reason;
401 * undo that brain damage:
402 */
1da177e4 403 p = q = &c->x86_model_id[0];
34048c9e 404 while (*p == ' ')
9766cdbc 405 p++;
34048c9e 406 if (p != q) {
9766cdbc
JSR
407 while (*p)
408 *q++ = *p++;
409 while (q <= &c->x86_model_id[48])
410 *q++ = '\0'; /* Zero-pad the rest */
1da177e4 411 }
1da177e4
LT
412}
413
27c13ece 414void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
1da177e4 415{
9d31d35b 416 unsigned int n, dummy, ebx, ecx, edx, l2size;
1da177e4 417
3da99c97 418 n = c->extended_cpuid_level;
1da177e4
LT
419
420 if (n >= 0x80000005) {
9d31d35b 421 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b 422 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727
YL
423#ifdef CONFIG_X86_64
424 /* On K8 L1 TLB is inclusive, so don't count it */
425 c->x86_tlbsize = 0;
426#endif
1da177e4
LT
427 }
428
429 if (n < 0x80000006) /* Some chips just has a large L1. */
430 return;
431
0a488a53 432 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
1da177e4 433 l2size = ecx >> 16;
34048c9e 434
140fc727
YL
435#ifdef CONFIG_X86_64
436 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
437#else
1da177e4
LT
438 /* do processor-specific cache resizing */
439 if (this_cpu->c_size_cache)
34048c9e 440 l2size = this_cpu->c_size_cache(c, l2size);
1da177e4
LT
441
442 /* Allow user to override all this if necessary. */
443 if (cachesize_override != -1)
444 l2size = cachesize_override;
445
34048c9e 446 if (l2size == 0)
1da177e4 447 return; /* Again, no L2 cache is possible */
140fc727 448#endif
1da177e4
LT
449
450 c->x86_cache_size = l2size;
1da177e4
LT
451}
452
9d31d35b 453void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4 454{
97e4db7c 455#ifdef CONFIG_X86_HT
0a488a53
YL
456 u32 eax, ebx, ecx, edx;
457 int index_msb, core_bits;
2eaad1fd 458 static bool printed;
1da177e4 459
0a488a53 460 if (!cpu_has(c, X86_FEATURE_HT))
9d31d35b 461 return;
1da177e4 462
0a488a53
YL
463 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
464 goto out;
1da177e4 465
1cd78776
YL
466 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
467 return;
1da177e4 468
0a488a53 469 cpuid(1, &eax, &ebx, &ecx, &edx);
1da177e4 470
9d31d35b
YL
471 smp_num_siblings = (ebx & 0xff0000) >> 16;
472
473 if (smp_num_siblings == 1) {
2eaad1fd 474 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
0f3fa48a
IM
475 goto out;
476 }
9d31d35b 477
0f3fa48a
IM
478 if (smp_num_siblings <= 1)
479 goto out;
9d31d35b 480
0f3fa48a
IM
481 index_msb = get_count_order(smp_num_siblings);
482 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
9d31d35b 483
0f3fa48a 484 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9d31d35b 485
0f3fa48a 486 index_msb = get_count_order(smp_num_siblings);
9d31d35b 487
0f3fa48a 488 core_bits = get_count_order(c->x86_max_cores);
9d31d35b 489
0f3fa48a
IM
490 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
491 ((1 << core_bits) - 1);
1da177e4 492
0a488a53 493out:
2eaad1fd 494 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
0a488a53
YL
495 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
496 c->phys_proc_id);
497 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
498 c->cpu_core_id);
2eaad1fd 499 printed = 1;
9d31d35b 500 }
9d31d35b 501#endif
97e4db7c 502}
1da177e4 503
3da99c97 504static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
505{
506 char *v = c->x86_vendor_id;
0f3fa48a 507 int i;
1da177e4
LT
508
509 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
510 if (!cpu_devs[i])
511 break;
512
513 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
514 (cpu_devs[i]->c_ident[1] &&
515 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
0f3fa48a 516
10a434fc
YL
517 this_cpu = cpu_devs[i];
518 c->x86_vendor = this_cpu->c_x86_vendor;
519 return;
1da177e4
LT
520 }
521 }
10a434fc 522
a9c56953
MK
523 printk_once(KERN_ERR
524 "CPU: vendor_id '%s' unknown, using generic init.\n" \
525 "CPU: Your system may be unstable.\n", v);
10a434fc 526
fe38d855
CE
527 c->x86_vendor = X86_VENDOR_UNKNOWN;
528 this_cpu = &default_cpu;
1da177e4
LT
529}
530
9d31d35b 531void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
1da177e4 532{
1da177e4 533 /* Get vendor name */
4a148513
HH
534 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
535 (unsigned int *)&c->x86_vendor_id[0],
536 (unsigned int *)&c->x86_vendor_id[8],
537 (unsigned int *)&c->x86_vendor_id[4]);
1da177e4 538
1da177e4 539 c->x86 = 4;
9d31d35b 540 /* Intel-defined flags: level 0x00000001 */
1da177e4
LT
541 if (c->cpuid_level >= 0x00000001) {
542 u32 junk, tfms, cap0, misc;
0f3fa48a 543
1da177e4 544 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
9d31d35b
YL
545 c->x86 = (tfms >> 8) & 0xf;
546 c->x86_model = (tfms >> 4) & 0xf;
547 c->x86_mask = tfms & 0xf;
0f3fa48a 548
f5f786d0 549 if (c->x86 == 0xf)
1da177e4 550 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 551 if (c->x86 >= 0x6)
9d31d35b 552 c->x86_model += ((tfms >> 16) & 0xf) << 4;
0f3fa48a 553
d4387bd3 554 if (cap0 & (1<<19)) {
d4387bd3 555 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b 556 c->x86_cache_alignment = c->x86_clflush_size;
d4387bd3 557 }
1da177e4 558 }
1da177e4 559}
3da99c97 560
d900329e 561void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
093af8d7
YL
562{
563 u32 tfms, xlvl;
3da99c97 564 u32 ebx;
093af8d7 565
3da99c97
YL
566 /* Intel-defined flags: level 0x00000001 */
567 if (c->cpuid_level >= 0x00000001) {
568 u32 capability, excap;
0f3fa48a 569
3da99c97
YL
570 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
571 c->x86_capability[0] = capability;
572 c->x86_capability[4] = excap;
573 }
093af8d7 574
bdc802dc
PA
575 /* Additional Intel-defined flags: level 0x00000007 */
576 if (c->cpuid_level >= 0x00000007) {
577 u32 eax, ebx, ecx, edx;
578
579 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
580
2494b030 581 c->x86_capability[9] = ebx;
bdc802dc
PA
582 }
583
3da99c97
YL
584 /* AMD-defined flags: level 0x80000001 */
585 xlvl = cpuid_eax(0x80000000);
586 c->extended_cpuid_level = xlvl;
0f3fa48a 587
3da99c97
YL
588 if ((xlvl & 0xffff0000) == 0x80000000) {
589 if (xlvl >= 0x80000001) {
590 c->x86_capability[1] = cpuid_edx(0x80000001);
591 c->x86_capability[6] = cpuid_ecx(0x80000001);
093af8d7 592 }
093af8d7 593 }
093af8d7 594
5122c890
YL
595 if (c->extended_cpuid_level >= 0x80000008) {
596 u32 eax = cpuid_eax(0x80000008);
597
598 c->x86_virt_bits = (eax >> 8) & 0xff;
599 c->x86_phys_bits = eax & 0xff;
093af8d7 600 }
13c6c532
JB
601#ifdef CONFIG_X86_32
602 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
603 c->x86_phys_bits = 36;
5122c890 604#endif
e3224234
YL
605
606 if (c->extended_cpuid_level >= 0x80000007)
607 c->x86_power = cpuid_edx(0x80000007);
093af8d7 608
1dedefd1 609 init_scattered_cpuid_features(c);
093af8d7 610}
1da177e4 611
aef93c8b
YL
612static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
613{
614#ifdef CONFIG_X86_32
615 int i;
616
617 /*
618 * First of all, decide if this is a 486 or higher
619 * It's a 486 if we can modify the AC flag
620 */
621 if (flag_is_changeable_p(X86_EFLAGS_AC))
622 c->x86 = 4;
623 else
624 c->x86 = 3;
625
626 for (i = 0; i < X86_VENDOR_NUM; i++)
627 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
628 c->x86_vendor_id[0] = 0;
629 cpu_devs[i]->c_identify(c);
630 if (c->x86_vendor_id[0]) {
631 get_cpu_vendor(c);
632 break;
633 }
634 }
635#endif
636}
637
34048c9e
PC
638/*
639 * Do minimum CPU detection early.
640 * Fields really needed: vendor, cpuid_level, family, model, mask,
641 * cache alignment.
642 * The others are not touched to avoid unwanted side effects.
643 *
644 * WARNING: this function is only called on the BP. Don't add code here
645 * that is supposed to run on all CPUs.
646 */
3da99c97 647static void __init early_identify_cpu(struct cpuinfo_x86 *c)
d7cd5611 648{
6627d242
YL
649#ifdef CONFIG_X86_64
650 c->x86_clflush_size = 64;
13c6c532
JB
651 c->x86_phys_bits = 36;
652 c->x86_virt_bits = 48;
6627d242 653#else
d4387bd3 654 c->x86_clflush_size = 32;
13c6c532
JB
655 c->x86_phys_bits = 32;
656 c->x86_virt_bits = 32;
6627d242 657#endif
0a488a53 658 c->x86_cache_alignment = c->x86_clflush_size;
d7cd5611 659
3da99c97 660 memset(&c->x86_capability, 0, sizeof c->x86_capability);
0a488a53 661 c->extended_cpuid_level = 0;
d7cd5611 662
aef93c8b
YL
663 if (!have_cpuid_p())
664 identify_cpu_without_cpuid(c);
665
666 /* cyrix could have cpuid enabled via c_identify()*/
d7cd5611
RR
667 if (!have_cpuid_p())
668 return;
669
670 cpu_detect(c);
671
3da99c97 672 get_cpu_vendor(c);
2b16a235 673
3da99c97 674 get_cpu_cap(c);
12cf105c 675
10a434fc
YL
676 if (this_cpu->c_early_init)
677 this_cpu->c_early_init(c);
093af8d7 678
1c4acdb4 679#ifdef CONFIG_SMP
f6e9456c 680 c->cpu_index = 0;
1c4acdb4 681#endif
b38b0665 682 filter_cpuid_features(c, false);
de5397ad
FY
683
684 setup_smep(c);
a110b5ec
BP
685
686 if (this_cpu->c_bsp_init)
687 this_cpu->c_bsp_init(c);
d7cd5611
RR
688}
689
9d31d35b
YL
690void __init early_cpu_init(void)
691{
02dde8b4 692 const struct cpu_dev *const *cdev;
10a434fc
YL
693 int count = 0;
694
ac23f253 695#ifdef CONFIG_PROCESSOR_SELECT
9766cdbc 696 printk(KERN_INFO "KERNEL supported cpus:\n");
31c997ca
IM
697#endif
698
10a434fc 699 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
02dde8b4 700 const struct cpu_dev *cpudev = *cdev;
9d31d35b 701
10a434fc
YL
702 if (count >= X86_VENDOR_NUM)
703 break;
704 cpu_devs[count] = cpudev;
705 count++;
706
ac23f253 707#ifdef CONFIG_PROCESSOR_SELECT
31c997ca
IM
708 {
709 unsigned int j;
710
711 for (j = 0; j < 2; j++) {
712 if (!cpudev->c_ident[j])
713 continue;
714 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
715 cpudev->c_ident[j]);
716 }
10a434fc 717 }
0388423d 718#endif
10a434fc 719 }
9d31d35b 720 early_identify_cpu(&boot_cpu_data);
d7cd5611 721}
093af8d7 722
b6734c35 723/*
366d4a43
BP
724 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
725 * unfortunately, that's not true in practice because of early VIA
726 * chips and (more importantly) broken virtualizers that are not easy
727 * to detect. In the latter case it doesn't even *fail* reliably, so
728 * probing for it doesn't even work. Disable it completely on 32-bit
ba0593bf 729 * unless we can find a reliable way to detect all the broken cases.
366d4a43 730 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
b6734c35
PA
731 */
732static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
733{
366d4a43 734#ifdef CONFIG_X86_32
b6734c35 735 clear_cpu_cap(c, X86_FEATURE_NOPL);
366d4a43
BP
736#else
737 set_cpu_cap(c, X86_FEATURE_NOPL);
738#endif
d7cd5611
RR
739}
740
34048c9e 741static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
1da177e4 742{
aef93c8b 743 c->extended_cpuid_level = 0;
1da177e4 744
3da99c97 745 if (!have_cpuid_p())
aef93c8b 746 identify_cpu_without_cpuid(c);
1d67953f 747
aef93c8b 748 /* cyrix could have cpuid enabled via c_identify()*/
a9853dd6 749 if (!have_cpuid_p())
aef93c8b 750 return;
1da177e4 751
3da99c97 752 cpu_detect(c);
1da177e4 753
3da99c97 754 get_cpu_vendor(c);
1da177e4 755
3da99c97 756 get_cpu_cap(c);
1da177e4 757
3da99c97
YL
758 if (c->cpuid_level >= 0x00000001) {
759 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
b89d3b3e
YL
760#ifdef CONFIG_X86_32
761# ifdef CONFIG_X86_HT
cb8cc442 762 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
b89d3b3e 763# else
3da99c97 764 c->apicid = c->initial_apicid;
b89d3b3e
YL
765# endif
766#endif
1da177e4 767
b89d3b3e
YL
768#ifdef CONFIG_X86_HT
769 c->phys_proc_id = c->initial_apicid;
1e9f28fa 770#endif
3da99c97 771 }
1da177e4 772
de5397ad
FY
773 setup_smep(c);
774
1b05d60d 775 get_model_name(c); /* Default name */
1da177e4 776
3da99c97 777 detect_nopl(c);
1da177e4 778}
1da177e4
LT
779
780/*
781 * This does the hard work of actually picking apart the CPU stuff...
782 */
9a250347 783static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
784{
785 int i;
786
787 c->loops_per_jiffy = loops_per_jiffy;
788 c->x86_cache_size = -1;
789 c->x86_vendor = X86_VENDOR_UNKNOWN;
1da177e4
LT
790 c->x86_model = c->x86_mask = 0; /* So far unknown... */
791 c->x86_vendor_id[0] = '\0'; /* Unset */
792 c->x86_model_id[0] = '\0'; /* Unset */
94605eff 793 c->x86_max_cores = 1;
102bbe3a 794 c->x86_coreid_bits = 0;
11fdd252 795#ifdef CONFIG_X86_64
102bbe3a 796 c->x86_clflush_size = 64;
13c6c532
JB
797 c->x86_phys_bits = 36;
798 c->x86_virt_bits = 48;
102bbe3a
YL
799#else
800 c->cpuid_level = -1; /* CPUID not detected */
770d132f 801 c->x86_clflush_size = 32;
13c6c532
JB
802 c->x86_phys_bits = 32;
803 c->x86_virt_bits = 32;
102bbe3a
YL
804#endif
805 c->x86_cache_alignment = c->x86_clflush_size;
1da177e4
LT
806 memset(&c->x86_capability, 0, sizeof c->x86_capability);
807
1da177e4
LT
808 generic_identify(c);
809
3898534d 810 if (this_cpu->c_identify)
1da177e4
LT
811 this_cpu->c_identify(c);
812
2759c328
YL
813 /* Clear/Set all flags overriden by options, after probe */
814 for (i = 0; i < NCAPINTS; i++) {
815 c->x86_capability[i] &= ~cpu_caps_cleared[i];
816 c->x86_capability[i] |= cpu_caps_set[i];
817 }
818
102bbe3a 819#ifdef CONFIG_X86_64
cb8cc442 820 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
102bbe3a
YL
821#endif
822
1da177e4
LT
823 /*
824 * Vendor-specific initialization. In this section we
825 * canonicalize the feature flags, meaning if there are
826 * features a certain CPU supports which CPUID doesn't
827 * tell us, CPUID claiming incorrect flags, or other bugs,
828 * we handle them here.
829 *
830 * At the end of this section, c->x86_capability better
831 * indicate the features this CPU genuinely supports!
832 */
833 if (this_cpu->c_init)
834 this_cpu->c_init(c);
835
836 /* Disable the PN if appropriate */
837 squash_the_stupid_serial_number(c);
838
839 /*
0f3fa48a
IM
840 * The vendor-specific functions might have changed features.
841 * Now we do "generic changes."
1da177e4
LT
842 */
843
b38b0665
PA
844 /* Filter out anything that depends on CPUID levels we don't have */
845 filter_cpuid_features(c, true);
846
1da177e4 847 /* If the model name is still unset, do table lookup. */
34048c9e 848 if (!c->x86_model_id[0]) {
02dde8b4 849 const char *p;
1da177e4 850 p = table_lookup_model(c);
34048c9e 851 if (p)
1da177e4
LT
852 strcpy(c->x86_model_id, p);
853 else
854 /* Last resort... */
855 sprintf(c->x86_model_id, "%02x/%02x",
54a20f8c 856 c->x86, c->x86_model);
1da177e4
LT
857 }
858
102bbe3a
YL
859#ifdef CONFIG_X86_64
860 detect_ht(c);
861#endif
862
88b094fb 863 init_hypervisor(c);
49d859d7 864 x86_init_rdrand(c);
3e0c3737
YL
865
866 /*
867 * Clear/Set all flags overriden by options, need do it
868 * before following smp all cpus cap AND.
869 */
870 for (i = 0; i < NCAPINTS; i++) {
871 c->x86_capability[i] &= ~cpu_caps_cleared[i];
872 c->x86_capability[i] |= cpu_caps_set[i];
873 }
874
1da177e4
LT
875 /*
876 * On SMP, boot_cpu_data holds the common feature set between
877 * all CPUs; so make sure that we indicate which features are
878 * common between the CPUs. The first time this routine gets
879 * executed, c == &boot_cpu_data.
880 */
34048c9e 881 if (c != &boot_cpu_data) {
1da177e4 882 /* AND the already accumulated flags with these */
9d31d35b 883 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
884 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
885 }
886
887 /* Init Machine Check Exception if available. */
5e09954a 888 mcheck_cpu_init(c);
30d432df
AK
889
890 select_idle_routine(c);
102bbe3a 891
de2d9445 892#ifdef CONFIG_NUMA
102bbe3a
YL
893 numa_add_cpu(smp_processor_id());
894#endif
a6c4e076 895}
31ab269a 896
e04d645f
GC
897#ifdef CONFIG_X86_64
898static void vgetcpu_set_mode(void)
899{
900 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
901 vgetcpu_mode = VGETCPU_RDTSCP;
902 else
903 vgetcpu_mode = VGETCPU_LSL;
904}
905#endif
906
a6c4e076
JF
907void __init identify_boot_cpu(void)
908{
909 identify_cpu(&boot_cpu_data);
02c68a02 910 init_amd_e400_c1e_mask();
102bbe3a 911#ifdef CONFIG_X86_32
a6c4e076 912 sysenter_setup();
6fe940d6 913 enable_sep_cpu();
e04d645f
GC
914#else
915 vgetcpu_set_mode();
102bbe3a 916#endif
a6c4e076 917}
3b520b23 918
a6c4e076
JF
919void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
920{
921 BUG_ON(c == &boot_cpu_data);
922 identify_cpu(c);
102bbe3a 923#ifdef CONFIG_X86_32
a6c4e076 924 enable_sep_cpu();
102bbe3a 925#endif
a6c4e076 926 mtrr_ap_init();
1da177e4
LT
927}
928
a0854a46 929struct msr_range {
0f3fa48a
IM
930 unsigned min;
931 unsigned max;
a0854a46 932};
1da177e4 933
02dde8b4 934static const struct msr_range msr_range_array[] __cpuinitconst = {
a0854a46
YL
935 { 0x00000000, 0x00000418},
936 { 0xc0000000, 0xc000040b},
937 { 0xc0010000, 0xc0010142},
938 { 0xc0011000, 0xc001103b},
939};
1da177e4 940
a0854a46
YL
941static void __cpuinit print_cpu_msr(void)
942{
0f3fa48a 943 unsigned index_min, index_max;
a0854a46
YL
944 unsigned index;
945 u64 val;
946 int i;
a0854a46
YL
947
948 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
949 index_min = msr_range_array[i].min;
950 index_max = msr_range_array[i].max;
0f3fa48a 951
a0854a46
YL
952 for (index = index_min; index < index_max; index++) {
953 if (rdmsrl_amd_safe(index, &val))
954 continue;
955 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
1da177e4 956 }
a0854a46
YL
957 }
958}
94605eff 959
a0854a46 960static int show_msr __cpuinitdata;
0f3fa48a 961
a0854a46
YL
962static __init int setup_show_msr(char *arg)
963{
964 int num;
3dd9d514 965
a0854a46 966 get_option(&arg, &num);
3dd9d514 967
a0854a46
YL
968 if (num > 0)
969 show_msr = num;
970 return 1;
1da177e4 971}
a0854a46 972__setup("show_msr=", setup_show_msr);
1da177e4 973
191679fd
AK
974static __init int setup_noclflush(char *arg)
975{
976 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
977 return 1;
978}
979__setup("noclflush", setup_noclflush);
980
3bc9b76b 981void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4 982{
02dde8b4 983 const char *vendor = NULL;
1da177e4 984
0f3fa48a 985 if (c->x86_vendor < X86_VENDOR_NUM) {
1da177e4 986 vendor = this_cpu->c_vendor;
0f3fa48a
IM
987 } else {
988 if (c->cpuid_level >= 0)
989 vendor = c->x86_vendor_id;
990 }
1da177e4 991
bd32a8cf 992 if (vendor && !strstr(c->x86_model_id, vendor))
9d31d35b 993 printk(KERN_CONT "%s ", vendor);
1da177e4 994
9d31d35b
YL
995 if (c->x86_model_id[0])
996 printk(KERN_CONT "%s", c->x86_model_id);
1da177e4 997 else
9d31d35b 998 printk(KERN_CONT "%d86", c->x86);
1da177e4 999
34048c9e 1000 if (c->x86_mask || c->cpuid_level >= 0)
9d31d35b 1001 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1002 else
9d31d35b 1003 printk(KERN_CONT "\n");
a0854a46
YL
1004
1005#ifdef CONFIG_SMP
1006 if (c->cpu_index < show_msr)
1007 print_cpu_msr();
1008#else
1009 if (show_msr)
1010 print_cpu_msr();
1011#endif
1da177e4
LT
1012}
1013
ac72e788
AK
1014static __init int setup_disablecpuid(char *arg)
1015{
1016 int bit;
0f3fa48a 1017
ac72e788
AK
1018 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1019 setup_clear_cpu_cap(bit);
1020 else
1021 return 0;
0f3fa48a 1022
ac72e788
AK
1023 return 1;
1024}
1025__setup("clearcpuid=", setup_disablecpuid);
1026
d5494d4f 1027#ifdef CONFIG_X86_64
9ff80942 1028struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
d5494d4f 1029
947e76cd
BG
1030DEFINE_PER_CPU_FIRST(union irq_stack_union,
1031 irq_stack_union) __aligned(PAGE_SIZE);
0f3fa48a 1032
bdf977b3
TH
1033/*
1034 * The following four percpu variables are hot. Align current_task to
1035 * cacheline size such that all four fall in the same cacheline.
1036 */
1037DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1038 &init_task;
1039EXPORT_PER_CPU_SYMBOL(current_task);
d5494d4f 1040
9af45651
BG
1041DEFINE_PER_CPU(unsigned long, kernel_stack) =
1042 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
1043EXPORT_PER_CPU_SYMBOL(kernel_stack);
1044
bdf977b3
TH
1045DEFINE_PER_CPU(char *, irq_stack_ptr) =
1046 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1047
56895530 1048DEFINE_PER_CPU(unsigned int, irq_count) = -1;
d5494d4f 1049
0f3fa48a
IM
1050/*
1051 * Special IST stacks which the CPU switches to when it calls
1052 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1053 * limit), all of them are 4K, except the debug stack which
1054 * is 8K.
1055 */
1056static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1057 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1058 [DEBUG_STACK - 1] = DEBUG_STKSZ
1059};
1060
92d65b23 1061static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
3e352aa8 1062 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
d5494d4f 1063
d5494d4f
YL
1064/* May not be marked __init: used by software suspend */
1065void syscall_init(void)
1da177e4 1066{
d5494d4f
YL
1067 /*
1068 * LSTAR and STAR live in a bit strange symbiosis.
1069 * They both write to the same internal register. STAR allows to
1070 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1071 */
1072 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
1073 wrmsrl(MSR_LSTAR, system_call);
1074 wrmsrl(MSR_CSTAR, ignore_sysret);
03ae5768 1075
d5494d4f
YL
1076#ifdef CONFIG_IA32_EMULATION
1077 syscall32_cpu_init();
1078#endif
03ae5768 1079
d5494d4f
YL
1080 /* Flags to clear on syscall */
1081 wrmsrl(MSR_SYSCALL_MASK,
1082 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
1da177e4 1083}
62111195 1084
d5494d4f
YL
1085unsigned long kernel_eflags;
1086
1087/*
1088 * Copies of the original ist values from the tss are only accessed during
1089 * debugging, no special alignment required.
1090 */
1091DEFINE_PER_CPU(struct orig_ist, orig_ist);
1092
0f3fa48a 1093#else /* CONFIG_X86_64 */
d5494d4f 1094
bdf977b3
TH
1095DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1096EXPORT_PER_CPU_SYMBOL(current_task);
1097
60a5317f 1098#ifdef CONFIG_CC_STACKPROTECTOR
53f82452 1099DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
60a5317f 1100#endif
d5494d4f 1101
60a5317f 1102/* Make sure %fs and %gs are initialized properly in idle threads */
6b2fb3c6 1103struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
f95d47ca
JF
1104{
1105 memset(regs, 0, sizeof(struct pt_regs));
65ea5b03 1106 regs->fs = __KERNEL_PERCPU;
60a5317f 1107 regs->gs = __KERNEL_STACK_CANARY;
0f3fa48a 1108
f95d47ca
JF
1109 return regs;
1110}
0f3fa48a 1111#endif /* CONFIG_X86_64 */
c5413fbe 1112
9766cdbc
JSR
1113/*
1114 * Clear all 6 debug registers:
1115 */
1116static void clear_all_debug_regs(void)
1117{
1118 int i;
1119
1120 for (i = 0; i < 8; i++) {
1121 /* Ignore db4, db5 */
1122 if ((i == 4) || (i == 5))
1123 continue;
1124
1125 set_debugreg(0, i);
1126 }
1127}
c5413fbe 1128
0bb9fef9
JW
1129#ifdef CONFIG_KGDB
1130/*
1131 * Restore debug regs if using kgdbwait and you have a kernel debugger
1132 * connection established.
1133 */
1134static void dbg_restore_debug_regs(void)
1135{
1136 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1137 arch_kgdb_ops.correct_hw_break();
1138}
1139#else /* ! CONFIG_KGDB */
1140#define dbg_restore_debug_regs()
1141#endif /* ! CONFIG_KGDB */
1142
d2cbcc49
RR
1143/*
1144 * cpu_init() initializes state that is per-CPU. Some data is already
1145 * initialized (naturally) in the bootstrap process, such as the GDT
1146 * and IDT. We reload them nevertheless, this function acts as a
1147 * 'CPU state barrier', nothing should get across.
1ba76586 1148 * A lot of state is already set up in PDA init for 64 bit
d2cbcc49 1149 */
1ba76586 1150#ifdef CONFIG_X86_64
0f3fa48a 1151
1ba76586
YL
1152void __cpuinit cpu_init(void)
1153{
0fe1e009 1154 struct orig_ist *oist;
1ba76586 1155 struct task_struct *me;
0f3fa48a
IM
1156 struct tss_struct *t;
1157 unsigned long v;
1158 int cpu;
1ba76586
YL
1159 int i;
1160
0f3fa48a
IM
1161 cpu = stack_smp_processor_id();
1162 t = &per_cpu(init_tss, cpu);
0fe1e009 1163 oist = &per_cpu(orig_ist, cpu);
0f3fa48a 1164
e7a22c1e 1165#ifdef CONFIG_NUMA
e534c7c5
LS
1166 if (cpu != 0 && percpu_read(numa_node) == 0 &&
1167 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1168 set_numa_node(early_cpu_to_node(cpu));
e7a22c1e 1169#endif
1ba76586
YL
1170
1171 me = current;
1172
c2d1cec1 1173 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1ba76586
YL
1174 panic("CPU#%d already initialized!\n", cpu);
1175
2eaad1fd 1176 pr_debug("Initializing CPU#%d\n", cpu);
1ba76586
YL
1177
1178 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1179
1180 /*
1181 * Initialize the per-CPU GDT with the boot GDT,
1182 * and set up the GDT descriptor:
1183 */
1184
552be871 1185 switch_to_new_gdt(cpu);
2697fbd5
BG
1186 loadsegment(fs, 0);
1187
1ba76586
YL
1188 load_idt((const struct desc_ptr *)&idt_descr);
1189
1190 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1191 syscall_init();
1192
1193 wrmsrl(MSR_FS_BASE, 0);
1194 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1195 barrier();
1196
4763ed4d 1197 x86_configure_nx();
06cd9a7d 1198 if (cpu != 0)
1ba76586
YL
1199 enable_x2apic();
1200
1201 /*
1202 * set up and load the per-CPU TSS
1203 */
0fe1e009 1204 if (!oist->ist[0]) {
92d65b23 1205 char *estacks = per_cpu(exception_stacks, cpu);
0f3fa48a 1206
1ba76586 1207 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
0f3fa48a 1208 estacks += exception_stack_sizes[v];
0fe1e009 1209 oist->ist[v] = t->x86_tss.ist[v] =
1ba76586
YL
1210 (unsigned long)estacks;
1211 }
1212 }
1213
1214 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
0f3fa48a 1215
1ba76586
YL
1216 /*
1217 * <= is required because the CPU will access up to
1218 * 8 bits beyond the end of the IO permission bitmap.
1219 */
1220 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1221 t->io_bitmap[i] = ~0UL;
1222
1223 atomic_inc(&init_mm.mm_count);
1224 me->active_mm = &init_mm;
8c5dfd25 1225 BUG_ON(me->mm);
1ba76586
YL
1226 enter_lazy_tlb(&init_mm, me);
1227
1228 load_sp0(t, &current->thread);
1229 set_tss_desc(cpu, t);
1230 load_TR_desc();
1231 load_LDT(&init_mm.context);
1232
0bb9fef9
JW
1233 clear_all_debug_regs();
1234 dbg_restore_debug_regs();
1ba76586
YL
1235
1236 fpu_init();
0e49bf66 1237 xsave_init();
1ba76586
YL
1238
1239 raw_local_save_flags(kernel_eflags);
1240
1241 if (is_uv_system())
1242 uv_cpu_init();
1243}
1244
1245#else
1246
d2cbcc49 1247void __cpuinit cpu_init(void)
9ee79a3d 1248{
d2cbcc49
RR
1249 int cpu = smp_processor_id();
1250 struct task_struct *curr = current;
34048c9e 1251 struct tss_struct *t = &per_cpu(init_tss, cpu);
9ee79a3d 1252 struct thread_struct *thread = &curr->thread;
62111195 1253
c2d1cec1 1254 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
62111195 1255 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
9766cdbc
JSR
1256 for (;;)
1257 local_irq_enable();
62111195
JF
1258 }
1259
1260 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1261
1262 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1263 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
62111195 1264
4d37e7e3 1265 load_idt(&idt_descr);
552be871 1266 switch_to_new_gdt(cpu);
1da177e4 1267
1da177e4
LT
1268 /*
1269 * Set up and load the per-CPU TSS and LDT
1270 */
1271 atomic_inc(&init_mm.mm_count);
62111195 1272 curr->active_mm = &init_mm;
8c5dfd25 1273 BUG_ON(curr->mm);
62111195 1274 enter_lazy_tlb(&init_mm, curr);
1da177e4 1275
faca6227 1276 load_sp0(t, thread);
34048c9e 1277 set_tss_desc(cpu, t);
1da177e4
LT
1278 load_TR_desc();
1279 load_LDT(&init_mm.context);
1280
f9a196b8
TG
1281 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1282
22c4e308 1283#ifdef CONFIG_DOUBLEFAULT
1da177e4
LT
1284 /* Set up doublefault TSS pointer in the GDT */
1285 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
22c4e308 1286#endif
1da177e4 1287
9766cdbc 1288 clear_all_debug_regs();
0bb9fef9 1289 dbg_restore_debug_regs();
1da177e4 1290
0e49bf66 1291 fpu_init();
dc1e35c6 1292 xsave_init();
1da177e4 1293}
1ba76586 1294#endif