Commit | Line | Data |
---|---|---|
ed1bbc40 TG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/spinlock.h> | |
4 | #include <linux/percpu.h> | |
5 | ||
6 | #include <asm/cpu_entry_area.h> | |
7 | #include <asm/pgtable.h> | |
8 | #include <asm/fixmap.h> | |
9 | #include <asm/desc.h> | |
10 | ||
11 | static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); | |
12 | ||
13 | #ifdef CONFIG_X86_64 | |
14 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | |
15 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); | |
16 | #endif | |
17 | ||
92a0f81d TG |
18 | struct cpu_entry_area *get_cpu_entry_area(int cpu) |
19 | { | |
20 | unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; | |
21 | BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); | |
22 | ||
23 | return (struct cpu_entry_area *) va; | |
24 | } | |
25 | EXPORT_SYMBOL(get_cpu_entry_area); | |
26 | ||
27 | void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) | |
28 | { | |
29 | unsigned long va = (unsigned long) cea_vaddr; | |
0f561fce DH |
30 | pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags); |
31 | ||
32 | /* | |
33 | * The cpu_entry_area is shared between the user and kernel | |
34 | * page tables. All of its ptes can safely be global. | |
35 | * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for | |
36 | * non-present PTEs, so be careful not to set it in that | |
37 | * case to avoid confusion. | |
38 | */ | |
39 | if (boot_cpu_has(X86_FEATURE_PGE) && | |
40 | (pgprot_val(flags) & _PAGE_PRESENT)) | |
41 | pte = pte_set_flags(pte, _PAGE_GLOBAL); | |
42 | ||
43 | set_pte_vaddr(va, pte); | |
92a0f81d TG |
44 | } |
45 | ||
ed1bbc40 | 46 | static void __init |
92a0f81d | 47 | cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) |
ed1bbc40 | 48 | { |
92a0f81d TG |
49 | for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) |
50 | cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); | |
ed1bbc40 TG |
51 | } |
52 | ||
10043e02 TG |
53 | static void percpu_setup_debug_store(int cpu) |
54 | { | |
55 | #ifdef CONFIG_CPU_SUP_INTEL | |
56 | int npages; | |
57 | void *cea; | |
58 | ||
59 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
60 | return; | |
61 | ||
62 | cea = &get_cpu_entry_area(cpu)->cpu_debug_store; | |
63 | npages = sizeof(struct debug_store) / PAGE_SIZE; | |
64 | BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); | |
65 | cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, | |
66 | PAGE_KERNEL); | |
67 | ||
68 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; | |
69 | /* | |
70 | * Force the population of PMDs for not yet allocated per cpu | |
71 | * memory like debug store buffers. | |
72 | */ | |
73 | npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; | |
74 | for (; npages; npages--, cea += PAGE_SIZE) | |
75 | cea_set_pte(cea, 0, PAGE_NONE); | |
76 | #endif | |
77 | } | |
78 | ||
ed1bbc40 TG |
79 | /* Setup the fixmap mappings only once per-processor */ |
80 | static void __init setup_cpu_entry_area(int cpu) | |
81 | { | |
82 | #ifdef CONFIG_X86_64 | |
83 | extern char _entry_trampoline[]; | |
84 | ||
85 | /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ | |
86 | pgprot_t gdt_prot = PAGE_KERNEL_RO; | |
87 | pgprot_t tss_prot = PAGE_KERNEL_RO; | |
88 | #else | |
89 | /* | |
90 | * On native 32-bit systems, the GDT cannot be read-only because | |
91 | * our double fault handler uses a task gate, and entering through | |
92 | * a task gate needs to change an available TSS to busy. If the | |
93 | * GDT is read-only, that will triple fault. The TSS cannot be | |
94 | * read-only because the CPU writes to it on task switches. | |
95 | * | |
96 | * On Xen PV, the GDT must be read-only because the hypervisor | |
97 | * requires it. | |
98 | */ | |
99 | pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? | |
100 | PAGE_KERNEL_RO : PAGE_KERNEL; | |
101 | pgprot_t tss_prot = PAGE_KERNEL; | |
102 | #endif | |
103 | ||
92a0f81d TG |
104 | cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), |
105 | gdt_prot); | |
106 | ||
107 | cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, | |
108 | per_cpu_ptr(&entry_stack_storage, cpu), 1, | |
109 | PAGE_KERNEL); | |
ed1bbc40 TG |
110 | |
111 | /* | |
112 | * The Intel SDM says (Volume 3, 7.2.1): | |
113 | * | |
114 | * Avoid placing a page boundary in the part of the TSS that the | |
115 | * processor reads during a task switch (the first 104 bytes). The | |
116 | * processor may not correctly perform address translations if a | |
117 | * boundary occurs in this area. During a task switch, the processor | |
118 | * reads and writes into the first 104 bytes of each TSS (using | |
119 | * contiguous physical addresses beginning with the physical address | |
120 | * of the first byte of the TSS). So, after TSS access begins, if | |
121 | * part of the 104 bytes is not physically contiguous, the processor | |
122 | * will access incorrect information without generating a page-fault | |
123 | * exception. | |
124 | * | |
125 | * There are also a lot of errata involving the TSS spanning a page | |
126 | * boundary. Assert that we're not doing that. | |
127 | */ | |
128 | BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ | |
129 | offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); | |
130 | BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); | |
92a0f81d TG |
131 | cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, |
132 | &per_cpu(cpu_tss_rw, cpu), | |
133 | sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); | |
ed1bbc40 TG |
134 | |
135 | #ifdef CONFIG_X86_32 | |
136 | per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); | |
137 | #endif | |
138 | ||
139 | #ifdef CONFIG_X86_64 | |
140 | BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); | |
141 | BUILD_BUG_ON(sizeof(exception_stacks) != | |
142 | sizeof(((struct cpu_entry_area *)0)->exception_stacks)); | |
92a0f81d TG |
143 | cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, |
144 | &per_cpu(exception_stacks, cpu), | |
145 | sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); | |
ed1bbc40 | 146 | |
92a0f81d | 147 | cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, |
ed1bbc40 TG |
148 | __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); |
149 | #endif | |
10043e02 | 150 | percpu_setup_debug_store(cpu); |
ed1bbc40 TG |
151 | } |
152 | ||
92a0f81d TG |
153 | static __init void setup_cpu_entry_area_ptes(void) |
154 | { | |
155 | #ifdef CONFIG_X86_32 | |
156 | unsigned long start, end; | |
157 | ||
158 | BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); | |
159 | BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); | |
160 | ||
161 | start = CPU_ENTRY_AREA_BASE; | |
162 | end = start + CPU_ENTRY_AREA_MAP_SIZE; | |
163 | ||
f6c4fd50 TG |
164 | /* Careful here: start + PMD_SIZE might wrap around */ |
165 | for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) | |
92a0f81d TG |
166 | populate_extra_pte(start); |
167 | #endif | |
168 | } | |
169 | ||
ed1bbc40 TG |
170 | void __init setup_cpu_entry_areas(void) |
171 | { | |
172 | unsigned int cpu; | |
173 | ||
92a0f81d TG |
174 | setup_cpu_entry_area_ptes(); |
175 | ||
ed1bbc40 TG |
176 | for_each_possible_cpu(cpu) |
177 | setup_cpu_entry_area(cpu); | |
945fd17a TG |
178 | |
179 | /* | |
180 | * This is the last essential update to swapper_pgdir which needs | |
181 | * to be synchronized to initial_page_table on 32bit. | |
182 | */ | |
183 | sync_initial_page_table(); | |
ed1bbc40 | 184 | } |