x86/cpu_entry_area: Move it to a separate unit
[linux-2.6-block.git] / arch / x86 / mm / cpu_entry_area.c
CommitLineData
ed1bbc40
TG
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/spinlock.h>
4#include <linux/percpu.h>
5
6#include <asm/cpu_entry_area.h>
7#include <asm/pgtable.h>
8#include <asm/fixmap.h>
9#include <asm/desc.h>
10
11static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
12
13#ifdef CONFIG_X86_64
14static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
15 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
16#endif
17
18static void __init
19set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
20{
21 for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
22 __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
23}
24
25/* Setup the fixmap mappings only once per-processor */
26static void __init setup_cpu_entry_area(int cpu)
27{
28#ifdef CONFIG_X86_64
29 extern char _entry_trampoline[];
30
31 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
32 pgprot_t gdt_prot = PAGE_KERNEL_RO;
33 pgprot_t tss_prot = PAGE_KERNEL_RO;
34#else
35 /*
36 * On native 32-bit systems, the GDT cannot be read-only because
37 * our double fault handler uses a task gate, and entering through
38 * a task gate needs to change an available TSS to busy. If the
39 * GDT is read-only, that will triple fault. The TSS cannot be
40 * read-only because the CPU writes to it on task switches.
41 *
42 * On Xen PV, the GDT must be read-only because the hypervisor
43 * requires it.
44 */
45 pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
46 PAGE_KERNEL_RO : PAGE_KERNEL;
47 pgprot_t tss_prot = PAGE_KERNEL;
48#endif
49
50 __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
51 set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page),
52 per_cpu_ptr(&entry_stack_storage, cpu), 1,
53 PAGE_KERNEL);
54
55 /*
56 * The Intel SDM says (Volume 3, 7.2.1):
57 *
58 * Avoid placing a page boundary in the part of the TSS that the
59 * processor reads during a task switch (the first 104 bytes). The
60 * processor may not correctly perform address translations if a
61 * boundary occurs in this area. During a task switch, the processor
62 * reads and writes into the first 104 bytes of each TSS (using
63 * contiguous physical addresses beginning with the physical address
64 * of the first byte of the TSS). So, after TSS access begins, if
65 * part of the 104 bytes is not physically contiguous, the processor
66 * will access incorrect information without generating a page-fault
67 * exception.
68 *
69 * There are also a lot of errata involving the TSS spanning a page
70 * boundary. Assert that we're not doing that.
71 */
72 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
73 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
74 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
75 set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
76 &per_cpu(cpu_tss_rw, cpu),
77 sizeof(struct tss_struct) / PAGE_SIZE,
78 tss_prot);
79
80#ifdef CONFIG_X86_32
81 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
82#endif
83
84#ifdef CONFIG_X86_64
85 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
86 BUILD_BUG_ON(sizeof(exception_stacks) !=
87 sizeof(((struct cpu_entry_area *)0)->exception_stacks));
88 set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
89 &per_cpu(exception_stacks, cpu),
90 sizeof(exception_stacks) / PAGE_SIZE,
91 PAGE_KERNEL);
92
93 __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
94 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
95#endif
96}
97
98void __init setup_cpu_entry_areas(void)
99{
100 unsigned int cpu;
101
102 for_each_possible_cpu(cpu)
103 setup_cpu_entry_area(cpu);
104}