x86/cpu_entry_area: Prepare for IST guard pages
[linux-2.6-block.git] / arch / x86 / mm / cpu_entry_area.c
CommitLineData
ed1bbc40
TG
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/spinlock.h>
4#include <linux/percpu.h>
d83212d5 5#include <linux/kallsyms.h>
6855dc41 6#include <linux/kcore.h>
ed1bbc40
TG
7
8#include <asm/cpu_entry_area.h>
9#include <asm/pgtable.h>
10#include <asm/fixmap.h>
11#include <asm/desc.h>
12
13static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
14
15#ifdef CONFIG_X86_64
019b17b3 16static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
ed1bbc40
TG
17#endif
18
92a0f81d
TG
19struct cpu_entry_area *get_cpu_entry_area(int cpu)
20{
21 unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
22 BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
23
24 return (struct cpu_entry_area *) va;
25}
26EXPORT_SYMBOL(get_cpu_entry_area);
27
28void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
29{
30 unsigned long va = (unsigned long) cea_vaddr;
0f561fce
DH
31 pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
32
33 /*
34 * The cpu_entry_area is shared between the user and kernel
35 * page tables. All of its ptes can safely be global.
36 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
37 * non-present PTEs, so be careful not to set it in that
38 * case to avoid confusion.
39 */
40 if (boot_cpu_has(X86_FEATURE_PGE) &&
41 (pgprot_val(flags) & _PAGE_PRESENT))
42 pte = pte_set_flags(pte, _PAGE_GLOBAL);
43
44 set_pte_vaddr(va, pte);
92a0f81d
TG
45}
46
ed1bbc40 47static void __init
92a0f81d 48cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
ed1bbc40 49{
92a0f81d
TG
50 for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
51 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
ed1bbc40
TG
52}
53
881a463c 54static void __init percpu_setup_debug_store(unsigned int cpu)
10043e02
TG
55{
56#ifdef CONFIG_CPU_SUP_INTEL
881a463c 57 unsigned int npages;
10043e02
TG
58 void *cea;
59
60 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
61 return;
62
63 cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
64 npages = sizeof(struct debug_store) / PAGE_SIZE;
65 BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
66 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
67 PAGE_KERNEL);
68
69 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
70 /*
71 * Force the population of PMDs for not yet allocated per cpu
72 * memory like debug store buffers.
73 */
74 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
75 for (; npages; npages--, cea += PAGE_SIZE)
76 cea_set_pte(cea, 0, PAGE_NONE);
77#endif
78}
79
a4af767a
TG
80#ifdef CONFIG_X86_64
81
82#define cea_map_stack(name) do { \
83 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
84 cea_map_percpu_pages(cea->estacks.name## _stack, \
85 estacks->name## _stack, npages, PAGE_KERNEL); \
86 } while (0)
87
88static void __init percpu_setup_exception_stacks(unsigned int cpu)
89{
90 struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
91 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
92 unsigned int npages;
93
94 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
95 /*
96 * The exceptions stack mappings in the per cpu area are protected
97 * by guard pages so each stack must be mapped separately.
98 */
99 cea_map_stack(DF);
100 cea_map_stack(NMI);
101 cea_map_stack(DB);
102 cea_map_stack(MCE);
103}
104#else
105static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
106#endif
107
ed1bbc40 108/* Setup the fixmap mappings only once per-processor */
881a463c 109static void __init setup_cpu_entry_area(unsigned int cpu)
ed1bbc40 110{
881a463c 111 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
ed1bbc40 112#ifdef CONFIG_X86_64
ed1bbc40
TG
113 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
114 pgprot_t gdt_prot = PAGE_KERNEL_RO;
115 pgprot_t tss_prot = PAGE_KERNEL_RO;
116#else
117 /*
118 * On native 32-bit systems, the GDT cannot be read-only because
119 * our double fault handler uses a task gate, and entering through
120 * a task gate needs to change an available TSS to busy. If the
121 * GDT is read-only, that will triple fault. The TSS cannot be
122 * read-only because the CPU writes to it on task switches.
123 *
124 * On Xen PV, the GDT must be read-only because the hypervisor
125 * requires it.
126 */
127 pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
128 PAGE_KERNEL_RO : PAGE_KERNEL;
129 pgprot_t tss_prot = PAGE_KERNEL;
130#endif
131
881a463c 132 cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
92a0f81d 133
881a463c 134 cea_map_percpu_pages(&cea->entry_stack_page,
92a0f81d
TG
135 per_cpu_ptr(&entry_stack_storage, cpu), 1,
136 PAGE_KERNEL);
ed1bbc40
TG
137
138 /*
139 * The Intel SDM says (Volume 3, 7.2.1):
140 *
141 * Avoid placing a page boundary in the part of the TSS that the
142 * processor reads during a task switch (the first 104 bytes). The
143 * processor may not correctly perform address translations if a
144 * boundary occurs in this area. During a task switch, the processor
145 * reads and writes into the first 104 bytes of each TSS (using
146 * contiguous physical addresses beginning with the physical address
147 * of the first byte of the TSS). So, after TSS access begins, if
148 * part of the 104 bytes is not physically contiguous, the processor
149 * will access incorrect information without generating a page-fault
150 * exception.
151 *
152 * There are also a lot of errata involving the TSS spanning a page
153 * boundary. Assert that we're not doing that.
154 */
155 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
156 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
157 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
881a463c 158 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
92a0f81d 159 sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
ed1bbc40
TG
160
161#ifdef CONFIG_X86_32
881a463c 162 per_cpu(cpu_entry_area, cpu) = cea;
ed1bbc40
TG
163#endif
164
a4af767a
TG
165 percpu_setup_exception_stacks(cpu);
166
10043e02 167 percpu_setup_debug_store(cpu);
ed1bbc40
TG
168}
169
92a0f81d
TG
170static __init void setup_cpu_entry_area_ptes(void)
171{
172#ifdef CONFIG_X86_32
173 unsigned long start, end;
174
175 BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
176 BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
177
178 start = CPU_ENTRY_AREA_BASE;
179 end = start + CPU_ENTRY_AREA_MAP_SIZE;
180
f6c4fd50
TG
181 /* Careful here: start + PMD_SIZE might wrap around */
182 for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
92a0f81d
TG
183 populate_extra_pte(start);
184#endif
185}
186
ed1bbc40
TG
187void __init setup_cpu_entry_areas(void)
188{
189 unsigned int cpu;
190
92a0f81d
TG
191 setup_cpu_entry_area_ptes();
192
ed1bbc40
TG
193 for_each_possible_cpu(cpu)
194 setup_cpu_entry_area(cpu);
945fd17a
TG
195
196 /*
197 * This is the last essential update to swapper_pgdir which needs
198 * to be synchronized to initial_page_table on 32bit.
199 */
200 sync_initial_page_table();
ed1bbc40 201}