treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 295
[linux-2.6-block.git] / arch / x86 / mm / pti.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
aa8c6248
TG
2/*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 *
aa8c6248
TG
5 * This code is based in part on work published here:
6 *
7 * https://github.com/IAIK/KAISER
8 *
9 * The original work was written by and and signed off by for the Linux
10 * kernel by:
11 *
12 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
13 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
14 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
15 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
16 *
17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
19 * Andy Lutomirsky <luto@amacapital.net>
20 */
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/bug.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/uaccess.h>
d68be4c4 30#include <linux/cpu.h>
aa8c6248
TG
31
32#include <asm/cpufeature.h>
33#include <asm/hypervisor.h>
85900ea5 34#include <asm/vsyscall.h>
aa8c6248
TG
35#include <asm/cmdline.h>
36#include <asm/pti.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/tlbflush.h>
40#include <asm/desc.h>
447ae316 41#include <asm/sections.h>
aa8c6248
TG
42
43#undef pr_fmt
44#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
45
03f4424f
AL
46/* Backporting helper */
47#ifndef __GFP_NOTRACK
48#define __GFP_NOTRACK 0
49#endif
50
16a3fe63
JR
51/*
52 * Define the page-table levels we clone for user-space on 32
53 * and 64 bit.
54 */
55#ifdef CONFIG_X86_64
56#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
57#else
58#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
59#endif
60
aa8c6248
TG
61static void __init pti_print_if_insecure(const char *reason)
62{
de791821 63 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
aa8c6248
TG
64 pr_info("%s\n", reason);
65}
66
41f4c20b
BP
67static void __init pti_print_if_secure(const char *reason)
68{
de791821 69 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
41f4c20b
BP
70 pr_info("%s\n", reason);
71}
72
4fe64a62 73static enum pti_mode {
8c06c774
DH
74 PTI_AUTO = 0,
75 PTI_FORCE_OFF,
76 PTI_FORCE_ON
77} pti_mode;
78
aa8c6248
TG
79void __init pti_check_boottime_disable(void)
80{
41f4c20b
BP
81 char arg[5];
82 int ret;
83
8c06c774
DH
84 /* Assume mode is auto unless overridden. */
85 pti_mode = PTI_AUTO;
86
aa8c6248 87 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
8c06c774 88 pti_mode = PTI_FORCE_OFF;
aa8c6248
TG
89 pti_print_if_insecure("disabled on XEN PV.");
90 return;
91 }
92
41f4c20b
BP
93 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
94 if (ret > 0) {
95 if (ret == 3 && !strncmp(arg, "off", 3)) {
8c06c774 96 pti_mode = PTI_FORCE_OFF;
41f4c20b
BP
97 pti_print_if_insecure("disabled on command line.");
98 return;
99 }
100 if (ret == 2 && !strncmp(arg, "on", 2)) {
8c06c774 101 pti_mode = PTI_FORCE_ON;
41f4c20b
BP
102 pti_print_if_secure("force enabled on command line.");
103 goto enable;
104 }
8c06c774
DH
105 if (ret == 4 && !strncmp(arg, "auto", 4)) {
106 pti_mode = PTI_AUTO;
41f4c20b 107 goto autosel;
8c06c774 108 }
41f4c20b
BP
109 }
110
d68be4c4
JP
111 if (cmdline_find_option_bool(boot_command_line, "nopti") ||
112 cpu_mitigations_off()) {
8c06c774 113 pti_mode = PTI_FORCE_OFF;
aa8c6248
TG
114 pti_print_if_insecure("disabled on command line.");
115 return;
116 }
117
41f4c20b 118autosel:
de791821 119 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
aa8c6248 120 return;
41f4c20b 121enable:
aa8c6248
TG
122 setup_force_cpu_cap(X86_FEATURE_PTI);
123}
124
23b77288 125pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
61e9b367
DH
126{
127 /*
128 * Changes to the high (kernel) portion of the kernelmode page
129 * tables are not automatically propagated to the usermode tables.
130 *
131 * Users should keep in mind that, unlike the kernelmode tables,
132 * there is no vmalloc_fault equivalent for the usermode tables.
133 * Top-level entries added to init_mm's usermode pgd after boot
134 * will not be automatically propagated to other mms.
135 */
136 if (!pgdp_maps_userspace(pgdp))
137 return pgd;
138
139 /*
140 * The user page tables get the full PGD, accessible from
141 * userspace:
142 */
143 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
144
145 /*
146 * If this is normal user memory, make it NX in the kernel
147 * pagetables so that, if we somehow screw up and return to
148 * usermode with the kernel CR3 loaded, we'll get a page fault
149 * instead of allowing user code to execute with the wrong CR3.
150 *
151 * As exceptions, we don't set NX if:
152 * - _PAGE_USER is not set. This could be an executable
153 * EFI runtime mapping or something similar, and the kernel
154 * may execute from it
155 * - we don't have NX support
156 * - we're clearing the PGD (i.e. the new pgd is not present).
157 */
158 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
159 (__supported_pte_mask & _PAGE_NX))
160 pgd.pgd |= _PAGE_NX;
161
162 /* return the copy of the PGD we want the kernel to use: */
163 return pgd;
164}
165
03f4424f
AL
166/*
167 * Walk the user copy of the page tables (optionally) trying to allocate
168 * page table pages on the way down.
169 *
170 * Returns a pointer to a P4D on success, or NULL on failure.
171 */
8c06c774 172static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
03f4424f
AL
173{
174 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
175 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
176
177 if (address < PAGE_OFFSET) {
178 WARN_ONCE(1, "attempt to walk user address\n");
179 return NULL;
180 }
181
182 if (pgd_none(*pgd)) {
183 unsigned long new_p4d_page = __get_free_page(gfp);
b2b7d986 184 if (WARN_ON_ONCE(!new_p4d_page))
03f4424f
AL
185 return NULL;
186
8d56eff2 187 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
03f4424f
AL
188 }
189 BUILD_BUG_ON(pgd_large(*pgd) != 0);
190
191 return p4d_offset(pgd, address);
192}
193
194/*
195 * Walk the user copy of the page tables (optionally) trying to allocate
196 * page table pages on the way down.
197 *
198 * Returns a pointer to a PMD on success, or NULL on failure.
199 */
8c06c774 200static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
03f4424f
AL
201{
202 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
b2b7d986 203 p4d_t *p4d;
03f4424f
AL
204 pud_t *pud;
205
b2b7d986
JB
206 p4d = pti_user_pagetable_walk_p4d(address);
207 if (!p4d)
208 return NULL;
209
03f4424f
AL
210 BUILD_BUG_ON(p4d_large(*p4d) != 0);
211 if (p4d_none(*p4d)) {
212 unsigned long new_pud_page = __get_free_page(gfp);
8c934e01 213 if (WARN_ON_ONCE(!new_pud_page))
03f4424f
AL
214 return NULL;
215
8d56eff2 216 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
03f4424f
AL
217 }
218
219 pud = pud_offset(p4d, address);
220 /* The user page tables do not use large mappings: */
221 if (pud_large(*pud)) {
222 WARN_ON(1);
223 return NULL;
224 }
225 if (pud_none(*pud)) {
226 unsigned long new_pmd_page = __get_free_page(gfp);
8c934e01 227 if (WARN_ON_ONCE(!new_pmd_page))
03f4424f
AL
228 return NULL;
229
8d56eff2 230 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
03f4424f
AL
231 }
232
233 return pmd_offset(pud, address);
234}
235
85900ea5
AL
236/*
237 * Walk the shadow copy of the page tables (optionally) trying to allocate
238 * page table pages on the way down. Does not support large pages.
239 *
240 * Note: this is only used when mapping *new* kernel data into the
241 * user/shadow page tables. It is never used for userspace data.
242 *
243 * Returns a pointer to a PTE on success, or NULL on failure.
244 */
ff924c5a 245static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
85900ea5
AL
246{
247 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
8c934e01 248 pmd_t *pmd;
85900ea5
AL
249 pte_t *pte;
250
8c934e01
JB
251 pmd = pti_user_pagetable_walk_pmd(address);
252 if (!pmd)
253 return NULL;
254
85900ea5
AL
255 /* We can't do anything sensible if we hit a large mapping. */
256 if (pmd_large(*pmd)) {
257 WARN_ON(1);
258 return NULL;
259 }
260
261 if (pmd_none(*pmd)) {
262 unsigned long new_pte_page = __get_free_page(gfp);
263 if (!new_pte_page)
264 return NULL;
265
8d56eff2 266 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
85900ea5
AL
267 }
268
269 pte = pte_offset_kernel(pmd, address);
270 if (pte_flags(*pte) & _PAGE_USER) {
271 WARN_ONCE(1, "attempt to walk to user pte\n");
272 return NULL;
273 }
274 return pte;
275}
276
16a3fe63 277#ifdef CONFIG_X86_VSYSCALL_EMULATION
85900ea5
AL
278static void __init pti_setup_vsyscall(void)
279{
280 pte_t *pte, *target_pte;
281 unsigned int level;
282
283 pte = lookup_address(VSYSCALL_ADDR, &level);
284 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
285 return;
286
287 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
288 if (WARN_ON(!target_pte))
289 return;
290
291 *target_pte = *pte;
292 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
293}
294#else
295static void __init pti_setup_vsyscall(void) { }
296#endif
297
16a3fe63
JR
298enum pti_clone_level {
299 PTI_CLONE_PMD,
300 PTI_CLONE_PTE,
301};
302
8c06c774 303static void
16a3fe63
JR
304pti_clone_pgtable(unsigned long start, unsigned long end,
305 enum pti_clone_level level)
03f4424f
AL
306{
307 unsigned long addr;
308
309 /*
310 * Clone the populated PMDs which cover start to end. These PMD areas
311 * can have holes.
312 */
16a3fe63
JR
313 for (addr = start; addr < end;) {
314 pte_t *pte, *target_pte;
03f4424f
AL
315 pmd_t *pmd, *target_pmd;
316 pgd_t *pgd;
317 p4d_t *p4d;
318 pud_t *pud;
319
935232ce
JR
320 /* Overflow check */
321 if (addr < start)
322 break;
323
03f4424f
AL
324 pgd = pgd_offset_k(addr);
325 if (WARN_ON(pgd_none(*pgd)))
326 return;
327 p4d = p4d_offset(pgd, addr);
328 if (WARN_ON(p4d_none(*p4d)))
329 return;
16a3fe63 330
03f4424f 331 pud = pud_offset(p4d, addr);
16a3fe63
JR
332 if (pud_none(*pud)) {
333 addr += PUD_SIZE;
03f4424f 334 continue;
16a3fe63
JR
335 }
336
03f4424f 337 pmd = pmd_offset(pud, addr);
16a3fe63
JR
338 if (pmd_none(*pmd)) {
339 addr += PMD_SIZE;
03f4424f 340 continue;
16a3fe63 341 }
03f4424f 342
16a3fe63
JR
343 if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
344 target_pmd = pti_user_pagetable_walk_pmd(addr);
345 if (WARN_ON(!target_pmd))
346 return;
347
348 /*
349 * Only clone present PMDs. This ensures only setting
350 * _PAGE_GLOBAL on present PMDs. This should only be
351 * called on well-known addresses anyway, so a non-
352 * present PMD would be a surprise.
353 */
354 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
355 return;
356
357 /*
358 * Setting 'target_pmd' below creates a mapping in both
359 * the user and kernel page tables. It is effectively
360 * global, so set it as global in both copies. Note:
361 * the X86_FEATURE_PGE check is not _required_ because
362 * the CPU ignores _PAGE_GLOBAL when PGE is not
363 * supported. The check keeps consistentency with
364 * code that only set this bit when supported.
365 */
366 if (boot_cpu_has(X86_FEATURE_PGE))
367 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
368
369 /*
370 * Copy the PMD. That is, the kernelmode and usermode
371 * tables will share the last-level page tables of this
372 * address range
373 */
374 *target_pmd = *pmd;
375
376 addr += PMD_SIZE;
377
378 } else if (level == PTI_CLONE_PTE) {
379
380 /* Walk the page-table down to the pte level */
381 pte = pte_offset_kernel(pmd, addr);
382 if (pte_none(*pte)) {
383 addr += PAGE_SIZE;
384 continue;
385 }
386
387 /* Only clone present PTEs */
388 if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
389 return;
390
391 /* Allocate PTE in the user page-table */
392 target_pte = pti_user_pagetable_walk_pte(addr);
393 if (WARN_ON(!target_pte))
394 return;
395
396 /* Set GLOBAL bit in both PTEs */
397 if (boot_cpu_has(X86_FEATURE_PGE))
398 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
399
400 /* Clone the PTE */
401 *target_pte = *pte;
402
403 addr += PAGE_SIZE;
404
405 } else {
406 BUG();
407 }
03f4424f
AL
408 }
409}
410
f94560cd 411#ifdef CONFIG_X86_64
f7cfbee9
AL
412/*
413 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
414 * next-level entry on 5-level systems.
415 */
416static void __init pti_clone_p4d(unsigned long addr)
417{
418 p4d_t *kernel_p4d, *user_p4d;
419 pgd_t *kernel_pgd;
420
421 user_p4d = pti_user_pagetable_walk_p4d(addr);
b2b7d986
JB
422 if (!user_p4d)
423 return;
424
f7cfbee9
AL
425 kernel_pgd = pgd_offset_k(addr);
426 kernel_p4d = p4d_offset(kernel_pgd, addr);
427 *user_p4d = *kernel_p4d;
428}
429
430/*
bf904d27
AL
431 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
432 * page table.
f7cfbee9
AL
433 */
434static void __init pti_clone_user_shared(void)
435{
bf904d27
AL
436 unsigned int cpu;
437
f7cfbee9 438 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
bf904d27
AL
439
440 for_each_possible_cpu(cpu) {
441 /*
442 * The SYSCALL64 entry code needs to be able to find the
443 * thread stack and needs one word of scratch space in which
444 * to spill a register. All of this lives in the TSS, in
445 * the sp1 and sp2 slots.
446 *
447 * This is done for all possible CPUs during boot to ensure
448 * that it's propagated to all mms. If we were to add one of
449 * these mappings during CPU hotplug, we would need to take
450 * some measure to make sure that every mm that subsequently
451 * ran on that CPU would have the relevant PGD entry in its
452 * pagetables. The usual vmalloc_fault() mechanism would not
453 * work for page faults taken in entry_SYSCALL_64 before RSP
454 * is set up.
455 */
456
457 unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
458 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
459 pte_t *target_pte;
460
461 target_pte = pti_user_pagetable_walk_pte(va);
462 if (WARN_ON(!target_pte))
463 return;
464
465 *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
466 }
f7cfbee9
AL
467}
468
f94560cd
JR
469#else /* CONFIG_X86_64 */
470
471/*
472 * On 32 bit PAE systems with 1GB of Kernel address space there is only
473 * one pgd/p4d for the whole kernel. Cloning that would map the whole
474 * address space into the user page-tables, making PTI useless. So clone
475 * the page-table on the PMD level to prevent that.
476 */
477static void __init pti_clone_user_shared(void)
478{
479 unsigned long start, end;
480
481 start = CPU_ENTRY_AREA_BASE;
482 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
483
16a3fe63 484 pti_clone_pgtable(start, end, PTI_CLONE_PMD);
f94560cd
JR
485}
486#endif /* CONFIG_X86_64 */
487
4b6bbe95 488/*
c5b679f5 489 * Clone the ESPFIX P4D into the user space visible page table
4b6bbe95
AL
490 */
491static void __init pti_setup_espfix64(void)
492{
493#ifdef CONFIG_X86_ESPFIX64
494 pti_clone_p4d(ESPFIX_BASE_ADDR);
495#endif
496}
497
6dc72c3c
TG
498/*
499 * Clone the populated PMDs of the entry and irqentry text and force it RO.
500 */
ba0364e2 501static void pti_clone_entry_text(void)
6dc72c3c 502{
16a3fe63
JR
503 pti_clone_pgtable((unsigned long) __entry_text_start,
504 (unsigned long) __irqentry_text_end,
505 PTI_CLONE_PMD);
6dc72c3c
TG
506}
507
8c06c774
DH
508/*
509 * Global pages and PCIDs are both ways to make kernel TLB entries
510 * live longer, reduce TLB misses and improve kernel performance.
511 * But, leaving all kernel text Global makes it potentially accessible
512 * to Meltdown-style attacks which make it trivial to find gadgets or
513 * defeat KASLR.
514 *
515 * Only use global pages when it is really worth it.
516 */
517static inline bool pti_kernel_image_global_ok(void)
518{
519 /*
520 * Systems with PCIDs get litlle benefit from global
521 * kernel text and are not worth the downsides.
522 */
523 if (cpu_feature_enabled(X86_FEATURE_PCID))
524 return false;
525
526 /*
527 * Only do global kernel image for pti=auto. Do the most
528 * secure thing (not global) if pti=on specified.
529 */
530 if (pti_mode != PTI_AUTO)
531 return false;
532
533 /*
534 * K8 may not tolerate the cleared _PAGE_RW on the userspace
535 * global kernel image pages. Do the safe thing (disable
536 * global kernel image). This is unlikely to ever be
537 * noticed because PTI is disabled by default on AMD CPUs.
538 */
539 if (boot_cpu_has(X86_FEATURE_K8))
540 return false;
541
b7c21bc5
DH
542 /*
543 * RANDSTRUCT derives its hardening benefits from the
544 * attacker's lack of knowledge about the layout of kernel
545 * data structures. Keep the kernel image non-global in
546 * cases where RANDSTRUCT is in use to help keep the layout a
547 * secret.
548 */
549 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
550 return false;
551
8c06c774
DH
552 return true;
553}
554
eac7073a
DH
555/*
556 * This is the only user for these and it is not arch-generic
557 * like the other set_memory.h functions. Just extern them.
558 */
559extern int set_memory_nonglobal(unsigned long addr, int numpages);
560extern int set_memory_global(unsigned long addr, int numpages);
561
8c06c774
DH
562/*
563 * For some configurations, map all of kernel text into the user page
564 * tables. This reduces TLB misses, especially on non-PCID systems.
565 */
b976690f 566static void pti_clone_kernel_text(void)
8c06c774 567{
a44ca8f5
DH
568 /*
569 * rodata is part of the kernel image and is normally
570 * readable on the filesystem or on the web. But, do not
571 * clone the areas past rodata, they might contain secrets.
572 */
8c06c774 573 unsigned long start = PFN_ALIGN(_text);
31570604 574 unsigned long end_clone = (unsigned long)__end_rodata_aligned;
eac7073a 575 unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
8c06c774
DH
576
577 if (!pti_kernel_image_global_ok())
578 return;
579
a44ca8f5
DH
580 pr_debug("mapping partial kernel image into user address space\n");
581
582 /*
583 * Note that this will undo _some_ of the work that
584 * pti_set_kernel_image_nonglobal() did to clear the
585 * global bit.
586 */
16a3fe63 587 pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
eac7073a
DH
588
589 /*
16a3fe63 590 * pti_clone_pgtable() will set the global bit in any PMDs
eac7073a
DH
591 * that it clones, but we also need to get any PTEs in
592 * the last level for areas that are not huge-page-aligned.
593 */
594
595 /* Set the global bit for normal non-__init kernel text: */
596 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
8c06c774
DH
597}
598
4fe64a62 599static void pti_set_kernel_image_nonglobal(void)
39114b7a
DH
600{
601 /*
602 * The identity map is created with PMDs, regardless of the
603 * actual length of the kernel. We need to clear
604 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
605 * of the image.
606 */
607 unsigned long start = PFN_ALIGN(_text);
608 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
609
eac7073a
DH
610 /*
611 * This clears _PAGE_GLOBAL from the entire kernel image.
612 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
613 * areas that are mapped to userspace.
614 */
39114b7a
DH
615 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
616}
617
aa8c6248
TG
618/*
619 * Initialize kernel page table isolation
620 */
621void __init pti_init(void)
622{
28e3ace7 623 if (!boot_cpu_has(X86_FEATURE_PTI))
aa8c6248
TG
624 return;
625
626 pr_info("enabled\n");
f7cfbee9 627
5e810595
JR
628#ifdef CONFIG_X86_32
629 /*
630 * We check for X86_FEATURE_PCID here. But the init-code will
631 * clear the feature flag on 32 bit because the feature is not
632 * supported on 32 bit anyway. To print the warning we need to
633 * check with cpuid directly again.
634 */
88c6f8a3 635 if (cpuid_ecx(0x1) & BIT(17)) {
5e810595
JR
636 /* Use printk to work around pr_fmt() */
637 printk(KERN_WARNING "\n");
638 printk(KERN_WARNING "************************************************************\n");
639 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
640 printk(KERN_WARNING "** **\n");
641 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
642 printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
643 printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
644 printk(KERN_WARNING "** **\n");
645 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
646 printk(KERN_WARNING "************************************************************\n");
647 }
648#endif
649
f7cfbee9 650 pti_clone_user_shared();
39114b7a
DH
651
652 /* Undo all global bits from the init pagetables in head_64.S: */
653 pti_set_kernel_image_nonglobal();
654 /* Replace some of the global bits just for shared entry text: */
6dc72c3c 655 pti_clone_entry_text();
4b6bbe95 656 pti_setup_espfix64();
85900ea5 657 pti_setup_vsyscall();
aa8c6248 658}
b976690f
JR
659
660/*
ba0364e2
JR
661 * Finalize the kernel mappings in the userspace page-table. Some of the
662 * mappings for the kernel image might have changed since pti_init()
663 * cloned them. This is because parts of the kernel image have been
664 * mapped RO and/or NX. These changes need to be cloned again to the
665 * userspace page-table.
b976690f
JR
666 */
667void pti_finalize(void)
668{
669 /*
ba0364e2
JR
670 * We need to clone everything (again) that maps parts of the
671 * kernel image.
b976690f 672 */
ba0364e2 673 pti_clone_entry_text();
b976690f 674 pti_clone_kernel_text();
d878efce
JR
675
676 debug_checkwx_user();
b976690f 677}