Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
0483e1fa TG |
2 | /* |
3 | * This file implements KASLR memory randomization for x86_64. It randomizes | |
4 | * the virtual address space of kernel memory regions (physical memory | |
5 | * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates | |
6 | * exploits relying on predictable kernel addresses. | |
7 | * | |
8 | * Entropy is generated using the KASLR early boot functions now shared in | |
9 | * the lib directory (originally written by Kees Cook). Randomization is | |
8624c1f6 KS |
10 | * done on PGD & P4D/PUD page table levels to increase possible addresses. |
11 | * The physical memory mapping code was adapted to support P4D/PUD level | |
12 | * virtual addresses. This implementation on the best configuration provides | |
13 | * 30,000 possible virtual addresses in average for each memory region. | |
14 | * An additional low memory page is used to ensure each CPU can start with | |
15 | * a PGD aligned virtual address (for realmode). | |
0483e1fa TG |
16 | * |
17 | * The order of each memory region is not changed. The feature looks at | |
18 | * the available space for the regions based on different configuration | |
19 | * options and randomizes the base and space between each. The size of the | |
20 | * physical memory mapping is the available physical memory. | |
21 | */ | |
22 | ||
23 | #include <linux/kernel.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/random.h> | |
57c8a661 | 26 | #include <linux/memblock.h> |
0483e1fa TG |
27 | |
28 | #include <asm/pgalloc.h> | |
29 | #include <asm/pgtable.h> | |
30 | #include <asm/setup.h> | |
31 | #include <asm/kaslr.h> | |
32 | ||
33 | #include "mm_internal.h" | |
34 | ||
35 | #define TB_SHIFT 40 | |
36 | ||
37 | /* | |
1dddd251 TG |
38 | * The end address could depend on more configuration options to make the |
39 | * highest amount of space for randomization available, but that's too hard | |
40 | * to keep straight and caused issues already. | |
0483e1fa | 41 | */ |
1dddd251 | 42 | static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; |
021182e5 | 43 | |
0483e1fa TG |
44 | /* |
45 | * Memory regions randomized by KASLR (except modules that use a separate logic | |
46 | * earlier during boot). The list is ordered based on virtual addresses. This | |
47 | * order is kept after randomization. | |
48 | */ | |
49 | static __initdata struct kaslr_memory_region { | |
50 | unsigned long *base; | |
51 | unsigned long size_tb; | |
52 | } kaslr_regions[] = { | |
09e61a77 | 53 | { &page_offset_base, 0 }, |
a7412546 | 54 | { &vmalloc_base, 0 }, |
25dfe478 | 55 | { &vmemmap_base, 1 }, |
0483e1fa TG |
56 | }; |
57 | ||
58 | /* Get size in bytes used by the memory region */ | |
59 | static inline unsigned long get_padding(struct kaslr_memory_region *region) | |
60 | { | |
61 | return (region->size_tb << TB_SHIFT); | |
62 | } | |
63 | ||
64 | /* | |
65 | * Apply no randomization if KASLR was disabled at boot or if KASAN | |
66 | * is enabled. KASAN shadow mappings rely on regions being PGD aligned. | |
67 | */ | |
68 | static inline bool kaslr_memory_enabled(void) | |
69 | { | |
a5ff1b34 | 70 | return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); |
0483e1fa TG |
71 | } |
72 | ||
73 | /* Initialize base and padding for each memory region randomized with KASLR */ | |
74 | void __init kernel_randomize_memory(void) | |
75 | { | |
76 | size_t i; | |
4fa5662b | 77 | unsigned long vaddr_start, vaddr; |
021182e5 | 78 | unsigned long rand, memory_tb; |
0483e1fa TG |
79 | struct rnd_state rand_state; |
80 | unsigned long remain_entropy; | |
81 | ||
ed7588d5 | 82 | vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; |
4fa5662b KS |
83 | vaddr = vaddr_start; |
84 | ||
25dfe478 | 85 | /* |
1dddd251 TG |
86 | * These BUILD_BUG_ON checks ensure the memory layout is consistent |
87 | * with the vaddr_start/vaddr_end variables. These checks are very | |
88 | * limited.... | |
25dfe478 TG |
89 | */ |
90 | BUILD_BUG_ON(vaddr_start >= vaddr_end); | |
1dddd251 | 91 | BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); |
25dfe478 TG |
92 | BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); |
93 | ||
0483e1fa TG |
94 | if (!kaslr_memory_enabled()) |
95 | return; | |
96 | ||
09e61a77 | 97 | kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT); |
a7412546 | 98 | kaslr_regions[1].size_tb = VMALLOC_SIZE_TB; |
09e61a77 | 99 | |
90397a41 TG |
100 | /* |
101 | * Update Physical memory mapping to available and | |
102 | * add padding if needed (especially for memory hotplug support). | |
103 | */ | |
021182e5 | 104 | BUG_ON(kaslr_regions[0].base != &page_offset_base); |
c7d2361f | 105 | memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + |
90397a41 | 106 | CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; |
021182e5 TG |
107 | |
108 | /* Adapt phyiscal memory region size based on available memory */ | |
109 | if (memory_tb < kaslr_regions[0].size_tb) | |
110 | kaslr_regions[0].size_tb = memory_tb; | |
111 | ||
0483e1fa TG |
112 | /* Calculate entropy available between regions */ |
113 | remain_entropy = vaddr_end - vaddr_start; | |
114 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) | |
115 | remain_entropy -= get_padding(&kaslr_regions[i]); | |
116 | ||
117 | prandom_seed_state(&rand_state, kaslr_get_random_long("Memory")); | |
118 | ||
119 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) { | |
120 | unsigned long entropy; | |
121 | ||
122 | /* | |
123 | * Select a random virtual address using the extra entropy | |
124 | * available. | |
125 | */ | |
126 | entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); | |
127 | prandom_bytes_state(&rand_state, &rand, sizeof(rand)); | |
ed7588d5 | 128 | if (pgtable_l5_enabled()) |
8624c1f6 KS |
129 | entropy = (rand % (entropy + 1)) & P4D_MASK; |
130 | else | |
131 | entropy = (rand % (entropy + 1)) & PUD_MASK; | |
0483e1fa TG |
132 | vaddr += entropy; |
133 | *kaslr_regions[i].base = vaddr; | |
134 | ||
135 | /* | |
136 | * Jump the region and add a minimum padding based on | |
137 | * randomization alignment. | |
138 | */ | |
139 | vaddr += get_padding(&kaslr_regions[i]); | |
ed7588d5 | 140 | if (pgtable_l5_enabled()) |
8624c1f6 KS |
141 | vaddr = round_up(vaddr + 1, P4D_SIZE); |
142 | else | |
143 | vaddr = round_up(vaddr + 1, PUD_SIZE); | |
0483e1fa TG |
144 | remain_entropy -= entropy; |
145 | } | |
146 | } | |
147 | ||
8624c1f6 | 148 | static void __meminit init_trampoline_pud(void) |
0483e1fa TG |
149 | { |
150 | unsigned long paddr, paddr_next; | |
151 | pgd_t *pgd; | |
152 | pud_t *pud_page, *pud_page_tramp; | |
153 | int i; | |
154 | ||
0483e1fa TG |
155 | pud_page_tramp = alloc_low_page(); |
156 | ||
157 | paddr = 0; | |
158 | pgd = pgd_offset_k((unsigned long)__va(paddr)); | |
159 | pud_page = (pud_t *) pgd_page_vaddr(*pgd); | |
160 | ||
161 | for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) { | |
162 | pud_t *pud, *pud_tramp; | |
163 | unsigned long vaddr = (unsigned long)__va(paddr); | |
164 | ||
165 | pud_tramp = pud_page_tramp + pud_index(paddr); | |
166 | pud = pud_page + pud_index(vaddr); | |
167 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; | |
168 | ||
169 | *pud_tramp = *pud; | |
170 | } | |
171 | ||
172 | set_pgd(&trampoline_pgd_entry, | |
173 | __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); | |
174 | } | |
8624c1f6 KS |
175 | |
176 | static void __meminit init_trampoline_p4d(void) | |
177 | { | |
178 | unsigned long paddr, paddr_next; | |
179 | pgd_t *pgd; | |
180 | p4d_t *p4d_page, *p4d_page_tramp; | |
181 | int i; | |
182 | ||
183 | p4d_page_tramp = alloc_low_page(); | |
184 | ||
185 | paddr = 0; | |
186 | pgd = pgd_offset_k((unsigned long)__va(paddr)); | |
187 | p4d_page = (p4d_t *) pgd_page_vaddr(*pgd); | |
188 | ||
189 | for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) { | |
190 | p4d_t *p4d, *p4d_tramp; | |
191 | unsigned long vaddr = (unsigned long)__va(paddr); | |
192 | ||
193 | p4d_tramp = p4d_page_tramp + p4d_index(paddr); | |
194 | p4d = p4d_page + p4d_index(vaddr); | |
195 | paddr_next = (paddr & P4D_MASK) + P4D_SIZE; | |
196 | ||
197 | *p4d_tramp = *p4d; | |
198 | } | |
199 | ||
200 | set_pgd(&trampoline_pgd_entry, | |
201 | __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp))); | |
202 | } | |
203 | ||
204 | /* | |
205 | * Create PGD aligned trampoline table to allow real mode initialization | |
206 | * of additional CPUs. Consume only 1 low memory page. | |
207 | */ | |
208 | void __meminit init_trampoline(void) | |
209 | { | |
210 | ||
211 | if (!kaslr_memory_enabled()) { | |
212 | init_trampoline_default(); | |
213 | return; | |
214 | } | |
215 | ||
ed7588d5 | 216 | if (pgtable_l5_enabled()) |
8624c1f6 KS |
217 | init_trampoline_p4d(); |
218 | else | |
219 | init_trampoline_pud(); | |
220 | } |