Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
57c8a661 | 6 | #include <linux/memblock.h> |
9f4c815c | 7 | #include <linux/sched.h> |
9f4c815c | 8 | #include <linux/mm.h> |
76ebd054 | 9 | #include <linux/interrupt.h> |
ee7ae7a1 TG |
10 | #include <linux/seq_file.h> |
11 | #include <linux/debugfs.h> | |
e59a1bb2 | 12 | #include <linux/pfn.h> |
8c4bfc6e | 13 | #include <linux/percpu.h> |
5a0e3ad6 | 14 | #include <linux/gfp.h> |
5bd5a452 | 15 | #include <linux/pci.h> |
d6472302 | 16 | #include <linux/vmalloc.h> |
9f4c815c | 17 | |
66441bd3 | 18 | #include <asm/e820/api.h> |
1da177e4 LT |
19 | #include <asm/processor.h> |
20 | #include <asm/tlbflush.h> | |
f8af095d | 21 | #include <asm/sections.h> |
93dbda7c | 22 | #include <asm/setup.h> |
7c0f6ba6 | 23 | #include <linux/uaccess.h> |
9f4c815c | 24 | #include <asm/pgalloc.h> |
c31c7d48 | 25 | #include <asm/proto.h> |
1219333d | 26 | #include <asm/pat.h> |
d1163651 | 27 | #include <asm/set_memory.h> |
1da177e4 | 28 | |
9df84993 IM |
29 | /* |
30 | * The current flushing context - we pass it instead of 5 arguments: | |
31 | */ | |
72e458df | 32 | struct cpa_data { |
d75586ad | 33 | unsigned long *vaddr; |
0fd64c23 | 34 | pgd_t *pgd; |
72e458df TG |
35 | pgprot_t mask_set; |
36 | pgprot_t mask_clr; | |
74256377 | 37 | unsigned long numpages; |
d75586ad | 38 | int flags; |
c31c7d48 | 39 | unsigned long pfn; |
f61c5ba2 TG |
40 | unsigned force_split : 1, |
41 | force_static_prot : 1; | |
d75586ad | 42 | int curpage; |
9ae28475 | 43 | struct page **pages; |
72e458df TG |
44 | }; |
45 | ||
4046460b | 46 | enum cpa_warn { |
f61c5ba2 | 47 | CPA_CONFLICT, |
4046460b TG |
48 | CPA_PROTECT, |
49 | CPA_DETECT, | |
50 | }; | |
51 | ||
52 | static const int cpa_warn_level = CPA_PROTECT; | |
53 | ||
ad5ca55f SS |
54 | /* |
55 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | |
56 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | |
57 | * entries change the page attribute in parallel to some other cpu | |
58 | * splitting a large page entry along with changing the attribute. | |
59 | */ | |
60 | static DEFINE_SPINLOCK(cpa_lock); | |
61 | ||
d75586ad SL |
62 | #define CPA_FLUSHTLB 1 |
63 | #define CPA_ARRAY 2 | |
9ae28475 | 64 | #define CPA_PAGES_ARRAY 4 |
c40a56a7 | 65 | #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */ |
d75586ad | 66 | |
65280e61 | 67 | #ifdef CONFIG_PROC_FS |
ce0c0e50 AK |
68 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
69 | ||
65280e61 | 70 | void update_page_count(int level, unsigned long pages) |
ce0c0e50 | 71 | { |
ce0c0e50 | 72 | /* Protect against CPA */ |
a79e53d8 | 73 | spin_lock(&pgd_lock); |
ce0c0e50 | 74 | direct_pages_count[level] += pages; |
a79e53d8 | 75 | spin_unlock(&pgd_lock); |
65280e61 TG |
76 | } |
77 | ||
78 | static void split_page_count(int level) | |
79 | { | |
c9e0d391 DJ |
80 | if (direct_pages_count[level] == 0) |
81 | return; | |
82 | ||
65280e61 TG |
83 | direct_pages_count[level]--; |
84 | direct_pages_count[level - 1] += PTRS_PER_PTE; | |
85 | } | |
86 | ||
e1759c21 | 87 | void arch_report_meminfo(struct seq_file *m) |
65280e61 | 88 | { |
b9c3bfc2 | 89 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
a06de630 HD |
90 | direct_pages_count[PG_LEVEL_4K] << 2); |
91 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
b9c3bfc2 | 92 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
a06de630 HD |
93 | direct_pages_count[PG_LEVEL_2M] << 11); |
94 | #else | |
b9c3bfc2 | 95 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
a06de630 HD |
96 | direct_pages_count[PG_LEVEL_2M] << 12); |
97 | #endif | |
a06de630 | 98 | if (direct_gbpages) |
b9c3bfc2 | 99 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
a06de630 | 100 | direct_pages_count[PG_LEVEL_1G] << 20); |
ce0c0e50 | 101 | } |
65280e61 TG |
102 | #else |
103 | static inline void split_page_count(int level) { } | |
104 | #endif | |
ce0c0e50 | 105 | |
5c280cf6 TG |
106 | #ifdef CONFIG_X86_CPA_STATISTICS |
107 | ||
108 | static unsigned long cpa_1g_checked; | |
109 | static unsigned long cpa_1g_sameprot; | |
110 | static unsigned long cpa_1g_preserved; | |
111 | static unsigned long cpa_2m_checked; | |
112 | static unsigned long cpa_2m_sameprot; | |
113 | static unsigned long cpa_2m_preserved; | |
5c280cf6 TG |
114 | static unsigned long cpa_4k_install; |
115 | ||
116 | static inline void cpa_inc_1g_checked(void) | |
117 | { | |
118 | cpa_1g_checked++; | |
119 | } | |
120 | ||
121 | static inline void cpa_inc_2m_checked(void) | |
122 | { | |
123 | cpa_2m_checked++; | |
124 | } | |
125 | ||
5c280cf6 TG |
126 | static inline void cpa_inc_4k_install(void) |
127 | { | |
128 | cpa_4k_install++; | |
129 | } | |
130 | ||
131 | static inline void cpa_inc_lp_sameprot(int level) | |
132 | { | |
133 | if (level == PG_LEVEL_1G) | |
134 | cpa_1g_sameprot++; | |
135 | else | |
136 | cpa_2m_sameprot++; | |
137 | } | |
138 | ||
139 | static inline void cpa_inc_lp_preserved(int level) | |
140 | { | |
141 | if (level == PG_LEVEL_1G) | |
142 | cpa_1g_preserved++; | |
143 | else | |
144 | cpa_2m_preserved++; | |
145 | } | |
146 | ||
147 | static int cpastats_show(struct seq_file *m, void *p) | |
148 | { | |
149 | seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked); | |
150 | seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot); | |
151 | seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved); | |
152 | seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked); | |
153 | seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot); | |
154 | seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved); | |
5c280cf6 TG |
155 | seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); |
156 | return 0; | |
157 | } | |
158 | ||
159 | static int cpastats_open(struct inode *inode, struct file *file) | |
160 | { | |
161 | return single_open(file, cpastats_show, NULL); | |
162 | } | |
163 | ||
164 | static const struct file_operations cpastats_fops = { | |
165 | .open = cpastats_open, | |
166 | .read = seq_read, | |
167 | .llseek = seq_lseek, | |
168 | .release = single_release, | |
169 | }; | |
170 | ||
171 | static int __init cpa_stats_init(void) | |
172 | { | |
173 | debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL, | |
174 | &cpastats_fops); | |
175 | return 0; | |
176 | } | |
177 | late_initcall(cpa_stats_init); | |
178 | #else | |
179 | static inline void cpa_inc_1g_checked(void) { } | |
180 | static inline void cpa_inc_2m_checked(void) { } | |
5c280cf6 TG |
181 | static inline void cpa_inc_4k_install(void) { } |
182 | static inline void cpa_inc_lp_sameprot(int level) { } | |
183 | static inline void cpa_inc_lp_preserved(int level) { } | |
184 | #endif | |
185 | ||
186 | ||
58e65b51 DH |
187 | static inline int |
188 | within(unsigned long addr, unsigned long start, unsigned long end) | |
189 | { | |
190 | return addr >= start && addr < end; | |
191 | } | |
192 | ||
193 | static inline int | |
194 | within_inclusive(unsigned long addr, unsigned long start, unsigned long end) | |
195 | { | |
196 | return addr >= start && addr <= end; | |
197 | } | |
198 | ||
c31c7d48 TG |
199 | #ifdef CONFIG_X86_64 |
200 | ||
201 | static inline unsigned long highmap_start_pfn(void) | |
202 | { | |
fc8d7826 | 203 | return __pa_symbol(_text) >> PAGE_SHIFT; |
c31c7d48 TG |
204 | } |
205 | ||
206 | static inline unsigned long highmap_end_pfn(void) | |
207 | { | |
4ff53087 TG |
208 | /* Do not reference physical address outside the kernel. */ |
209 | return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; | |
c31c7d48 TG |
210 | } |
211 | ||
58e65b51 | 212 | static bool __cpa_pfn_in_highmap(unsigned long pfn) |
687c4825 | 213 | { |
58e65b51 DH |
214 | /* |
215 | * Kernel text has an alias mapping at a high address, known | |
216 | * here as "highmap". | |
217 | */ | |
218 | return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); | |
ed724be6 AV |
219 | } |
220 | ||
58e65b51 DH |
221 | #else |
222 | ||
223 | static bool __cpa_pfn_in_highmap(unsigned long pfn) | |
4ff53087 | 224 | { |
58e65b51 DH |
225 | /* There is no highmap on 32-bit */ |
226 | return false; | |
4ff53087 TG |
227 | } |
228 | ||
58e65b51 DH |
229 | #endif |
230 | ||
d7c8f21a TG |
231 | /* |
232 | * Flushing functions | |
233 | */ | |
cd8ddf1a | 234 | |
cd8ddf1a TG |
235 | /** |
236 | * clflush_cache_range - flush a cache range with clflush | |
9efc31b8 | 237 | * @vaddr: virtual start address |
cd8ddf1a TG |
238 | * @size: number of bytes to flush |
239 | * | |
8b80fd8b RZ |
240 | * clflushopt is an unordered instruction which needs fencing with mfence or |
241 | * sfence to avoid ordering issues. | |
cd8ddf1a | 242 | */ |
4c61afcd | 243 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 244 | { |
1f1a89ac CW |
245 | const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; |
246 | void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); | |
6c434d61 | 247 | void *vend = vaddr + size; |
1f1a89ac CW |
248 | |
249 | if (p >= vend) | |
250 | return; | |
d7c8f21a | 251 | |
cd8ddf1a | 252 | mb(); |
4c61afcd | 253 | |
1f1a89ac | 254 | for (; p < vend; p += clflush_size) |
6c434d61 | 255 | clflushopt(p); |
4c61afcd | 256 | |
cd8ddf1a | 257 | mb(); |
d7c8f21a | 258 | } |
e517a5e9 | 259 | EXPORT_SYMBOL_GPL(clflush_cache_range); |
d7c8f21a | 260 | |
f2b61257 DW |
261 | void arch_invalidate_pmem(void *addr, size_t size) |
262 | { | |
263 | clflush_cache_range(addr, size); | |
264 | } | |
265 | EXPORT_SYMBOL_GPL(arch_invalidate_pmem); | |
266 | ||
af1e6844 | 267 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 268 | { |
6bb8383b AK |
269 | unsigned long cache = (unsigned long)arg; |
270 | ||
d7c8f21a TG |
271 | /* |
272 | * Flush all to work around Errata in early athlons regarding | |
273 | * large page flushing. | |
274 | */ | |
275 | __flush_tlb_all(); | |
276 | ||
0b827537 | 277 | if (cache && boot_cpu_data.x86 >= 4) |
d7c8f21a TG |
278 | wbinvd(); |
279 | } | |
280 | ||
6bb8383b | 281 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a | 282 | { |
d2479a30 | 283 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
d7c8f21a | 284 | |
15c8b6c1 | 285 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
d7c8f21a TG |
286 | } |
287 | ||
47e262ac | 288 | static bool __cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 289 | { |
a53276e2 | 290 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
47e262ac | 291 | |
4c61afcd | 292 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 293 | |
7904ba8a | 294 | if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { |
5f464b33 | 295 | cpa_flush_all(cache); |
47e262ac | 296 | return true; |
5f464b33 PZ |
297 | } |
298 | ||
a7295fd5 | 299 | flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); |
57a6a46a | 300 | |
47e262ac PZ |
301 | return !cache; |
302 | } | |
303 | ||
304 | static void cpa_flush_range(unsigned long start, int numpages, int cache) | |
305 | { | |
306 | unsigned int i, level; | |
307 | unsigned long addr; | |
308 | ||
309 | if (__cpa_flush_range(start, numpages, cache)) | |
6bb8383b AK |
310 | return; |
311 | ||
3b233e52 TG |
312 | /* |
313 | * We only need to flush on one CPU, | |
314 | * clflush is a MESI-coherent instruction that | |
315 | * will cause all other CPUs to flush the same | |
316 | * cachelines: | |
317 | */ | |
4c61afcd IM |
318 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
319 | pte_t *pte = lookup_address(addr, &level); | |
320 | ||
321 | /* | |
322 | * Only flush present addresses: | |
323 | */ | |
7bfb72e8 | 324 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
325 | clflush_cache_range((void *) addr, PAGE_SIZE); |
326 | } | |
57a6a46a TG |
327 | } |
328 | ||
a7295fd5 PZ |
329 | static void cpa_flush_array(unsigned long baddr, unsigned long *start, |
330 | int numpages, int cache, | |
9ae28475 | 331 | int in_flags, struct page **pages) |
d75586ad SL |
332 | { |
333 | unsigned int i, level; | |
d75586ad | 334 | |
47e262ac | 335 | if (__cpa_flush_range(baddr, numpages, cache)) |
d75586ad SL |
336 | return; |
337 | ||
d75586ad SL |
338 | /* |
339 | * We only need to flush on one CPU, | |
340 | * clflush is a MESI-coherent instruction that | |
341 | * will cause all other CPUs to flush the same | |
342 | * cachelines: | |
343 | */ | |
9ae28475 | 344 | for (i = 0; i < numpages; i++) { |
345 | unsigned long addr; | |
346 | pte_t *pte; | |
347 | ||
348 | if (in_flags & CPA_PAGES_ARRAY) | |
349 | addr = (unsigned long)page_address(pages[i]); | |
350 | else | |
351 | addr = start[i]; | |
352 | ||
353 | pte = lookup_address(addr, &level); | |
d75586ad SL |
354 | |
355 | /* | |
356 | * Only flush present addresses: | |
357 | */ | |
358 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | |
9ae28475 | 359 | clflush_cache_range((void *)addr, PAGE_SIZE); |
d75586ad SL |
360 | } |
361 | } | |
362 | ||
91ee8f5c TG |
363 | static bool overlaps(unsigned long r1_start, unsigned long r1_end, |
364 | unsigned long r2_start, unsigned long r2_end) | |
365 | { | |
366 | return (r1_start <= r2_end && r1_end >= r2_start) || | |
367 | (r2_start <= r1_end && r2_end >= r1_start); | |
368 | } | |
369 | ||
afd7969a | 370 | #ifdef CONFIG_PCI_BIOS |
ed724be6 | 371 | /* |
afd7969a TG |
372 | * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS |
373 | * based config access (CONFIG_PCI_GOBIOS) support. | |
ed724be6 | 374 | */ |
afd7969a | 375 | #define BIOS_PFN PFN_DOWN(BIOS_BEGIN) |
91ee8f5c | 376 | #define BIOS_PFN_END PFN_DOWN(BIOS_END - 1) |
ed724be6 | 377 | |
91ee8f5c | 378 | static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) |
afd7969a | 379 | { |
91ee8f5c | 380 | if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END)) |
afd7969a TG |
381 | return _PAGE_NX; |
382 | return 0; | |
383 | } | |
384 | #else | |
91ee8f5c | 385 | static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) |
afd7969a TG |
386 | { |
387 | return 0; | |
388 | } | |
5bd5a452 | 389 | #endif |
ed724be6 | 390 | |
afd7969a TG |
391 | /* |
392 | * The .rodata section needs to be read-only. Using the pfn catches all | |
393 | * aliases. This also includes __ro_after_init, so do not enforce until | |
394 | * kernel_set_to_readonly is true. | |
395 | */ | |
91ee8f5c | 396 | static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn) |
afd7969a | 397 | { |
91ee8f5c TG |
398 | unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata)); |
399 | ||
400 | /* | |
401 | * Note: __end_rodata is at page aligned and not inclusive, so | |
402 | * subtract 1 to get the last enforced PFN in the rodata area. | |
403 | */ | |
404 | epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1; | |
cc0f21bb | 405 | |
91ee8f5c | 406 | if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro)) |
afd7969a TG |
407 | return _PAGE_RW; |
408 | return 0; | |
409 | } | |
410 | ||
411 | /* | |
412 | * Protect kernel text against becoming non executable by forbidding | |
413 | * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext) | |
414 | * out of which the kernel actually executes. Do not protect the low | |
415 | * mapping. | |
416 | * | |
417 | * This does not cover __inittext since that is gone after boot. | |
418 | */ | |
91ee8f5c | 419 | static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end) |
afd7969a | 420 | { |
91ee8f5c TG |
421 | unsigned long t_end = (unsigned long)_etext - 1; |
422 | unsigned long t_start = (unsigned long)_text; | |
423 | ||
424 | if (overlaps(start, end, t_start, t_end)) | |
afd7969a TG |
425 | return _PAGE_NX; |
426 | return 0; | |
427 | } | |
ed724be6 | 428 | |
9ccaf77c | 429 | #if defined(CONFIG_X86_64) |
afd7969a TG |
430 | /* |
431 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), | |
432 | * kernel text mappings for the large page aligned text, rodata sections | |
433 | * will be always read-only. For the kernel identity mappings covering the | |
434 | * holes caused by this alignment can be anything that user asks. | |
435 | * | |
436 | * This will preserve the large page mappings for kernel text/data at no | |
437 | * extra cost. | |
438 | */ | |
91ee8f5c TG |
439 | static pgprotval_t protect_kernel_text_ro(unsigned long start, |
440 | unsigned long end) | |
afd7969a | 441 | { |
91ee8f5c TG |
442 | unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1; |
443 | unsigned long t_start = (unsigned long)_text; | |
afd7969a TG |
444 | unsigned int level; |
445 | ||
91ee8f5c | 446 | if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end)) |
afd7969a | 447 | return 0; |
74e08179 | 448 | /* |
afd7969a TG |
449 | * Don't enforce the !RW mapping for the kernel text mapping, if |
450 | * the current mapping is already using small page mapping. No | |
451 | * need to work hard to preserve large page mappings in this case. | |
74e08179 | 452 | * |
afd7969a TG |
453 | * This also fixes the Linux Xen paravirt guest boot failure caused |
454 | * by unexpected read-only mappings for kernel identity | |
455 | * mappings. In this paravirt guest case, the kernel text mapping | |
456 | * and the kernel identity mapping share the same page-table pages, | |
457 | * so the protections for kernel text and identity mappings have to | |
458 | * be the same. | |
74e08179 | 459 | */ |
91ee8f5c | 460 | if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) |
afd7969a TG |
461 | return _PAGE_RW; |
462 | return 0; | |
463 | } | |
464 | #else | |
91ee8f5c TG |
465 | static pgprotval_t protect_kernel_text_ro(unsigned long start, |
466 | unsigned long end) | |
afd7969a TG |
467 | { |
468 | return 0; | |
469 | } | |
74e08179 SS |
470 | #endif |
471 | ||
4046460b TG |
472 | static inline bool conflicts(pgprot_t prot, pgprotval_t val) |
473 | { | |
474 | return (pgprot_val(prot) & ~val) != pgprot_val(prot); | |
475 | } | |
476 | ||
477 | static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val, | |
478 | unsigned long start, unsigned long end, | |
479 | unsigned long pfn, const char *txt) | |
480 | { | |
481 | static const char *lvltxt[] = { | |
f61c5ba2 | 482 | [CPA_CONFLICT] = "conflict", |
4046460b TG |
483 | [CPA_PROTECT] = "protect", |
484 | [CPA_DETECT] = "detect", | |
485 | }; | |
486 | ||
487 | if (warnlvl > cpa_warn_level || !conflicts(prot, val)) | |
488 | return; | |
489 | ||
490 | pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n", | |
491 | lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot), | |
492 | (unsigned long long)val); | |
493 | } | |
494 | ||
afd7969a TG |
495 | /* |
496 | * Certain areas of memory on x86 require very specific protection flags, | |
497 | * for example the BIOS area or kernel text. Callers don't always get this | |
498 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
499 | * checks and fixes these known static required protection bits. | |
500 | */ | |
91ee8f5c | 501 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, |
4046460b TG |
502 | unsigned long pfn, unsigned long npg, |
503 | int warnlvl) | |
afd7969a | 504 | { |
4046460b | 505 | pgprotval_t forbidden, res; |
91ee8f5c | 506 | unsigned long end; |
afd7969a | 507 | |
69c31e69 TG |
508 | /* |
509 | * There is no point in checking RW/NX conflicts when the requested | |
510 | * mapping is setting the page !PRESENT. | |
511 | */ | |
512 | if (!(pgprot_val(prot) & _PAGE_PRESENT)) | |
513 | return prot; | |
514 | ||
afd7969a | 515 | /* Operate on the virtual address */ |
91ee8f5c | 516 | end = start + npg * PAGE_SIZE - 1; |
4046460b TG |
517 | |
518 | res = protect_kernel_text(start, end); | |
519 | check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); | |
520 | forbidden = res; | |
521 | ||
522 | res = protect_kernel_text_ro(start, end); | |
523 | check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); | |
524 | forbidden |= res; | |
afd7969a TG |
525 | |
526 | /* Check the PFN directly */ | |
4046460b TG |
527 | res = protect_pci_bios(pfn, pfn + npg - 1); |
528 | check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX"); | |
529 | forbidden |= res; | |
530 | ||
531 | res = protect_rodata(pfn, pfn + npg - 1); | |
532 | check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO"); | |
533 | forbidden |= res; | |
687c4825 | 534 | |
afd7969a | 535 | return __pgprot(pgprot_val(prot) & ~forbidden); |
687c4825 IM |
536 | } |
537 | ||
426e34cc MF |
538 | /* |
539 | * Lookup the page table entry for a virtual address in a specific pgd. | |
540 | * Return a pointer to the entry and the level of the mapping. | |
541 | */ | |
542 | pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, | |
543 | unsigned int *level) | |
9f4c815c | 544 | { |
45478336 | 545 | p4d_t *p4d; |
1da177e4 LT |
546 | pud_t *pud; |
547 | pmd_t *pmd; | |
9f4c815c | 548 | |
30551bb3 TG |
549 | *level = PG_LEVEL_NONE; |
550 | ||
1da177e4 LT |
551 | if (pgd_none(*pgd)) |
552 | return NULL; | |
9df84993 | 553 | |
45478336 KS |
554 | p4d = p4d_offset(pgd, address); |
555 | if (p4d_none(*p4d)) | |
556 | return NULL; | |
557 | ||
558 | *level = PG_LEVEL_512G; | |
559 | if (p4d_large(*p4d) || !p4d_present(*p4d)) | |
560 | return (pte_t *)p4d; | |
561 | ||
562 | pud = pud_offset(p4d, address); | |
1da177e4 LT |
563 | if (pud_none(*pud)) |
564 | return NULL; | |
c2f71ee2 AK |
565 | |
566 | *level = PG_LEVEL_1G; | |
567 | if (pud_large(*pud) || !pud_present(*pud)) | |
568 | return (pte_t *)pud; | |
569 | ||
1da177e4 LT |
570 | pmd = pmd_offset(pud, address); |
571 | if (pmd_none(*pmd)) | |
572 | return NULL; | |
30551bb3 TG |
573 | |
574 | *level = PG_LEVEL_2M; | |
9a14aefc | 575 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 576 | return (pte_t *)pmd; |
1da177e4 | 577 | |
30551bb3 | 578 | *level = PG_LEVEL_4K; |
9df84993 | 579 | |
9f4c815c IM |
580 | return pte_offset_kernel(pmd, address); |
581 | } | |
0fd64c23 BP |
582 | |
583 | /* | |
584 | * Lookup the page table entry for a virtual address. Return a pointer | |
585 | * to the entry and the level of the mapping. | |
586 | * | |
587 | * Note: We return pud and pmd either when the entry is marked large | |
588 | * or when the present bit is not set. Otherwise we would return a | |
589 | * pointer to a nonexisting mapping. | |
590 | */ | |
591 | pte_t *lookup_address(unsigned long address, unsigned int *level) | |
592 | { | |
8679de09 | 593 | return lookup_address_in_pgd(pgd_offset_k(address), address, level); |
0fd64c23 | 594 | } |
75bb8835 | 595 | EXPORT_SYMBOL_GPL(lookup_address); |
9f4c815c | 596 | |
0fd64c23 BP |
597 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, |
598 | unsigned int *level) | |
599 | { | |
8679de09 | 600 | if (cpa->pgd) |
426e34cc | 601 | return lookup_address_in_pgd(cpa->pgd + pgd_index(address), |
0fd64c23 BP |
602 | address, level); |
603 | ||
8679de09 | 604 | return lookup_address(address, level); |
0fd64c23 BP |
605 | } |
606 | ||
792230c3 JG |
607 | /* |
608 | * Lookup the PMD entry for a virtual address. Return a pointer to the entry | |
609 | * or NULL if not present. | |
610 | */ | |
611 | pmd_t *lookup_pmd_address(unsigned long address) | |
612 | { | |
613 | pgd_t *pgd; | |
45478336 | 614 | p4d_t *p4d; |
792230c3 JG |
615 | pud_t *pud; |
616 | ||
617 | pgd = pgd_offset_k(address); | |
618 | if (pgd_none(*pgd)) | |
619 | return NULL; | |
620 | ||
45478336 KS |
621 | p4d = p4d_offset(pgd, address); |
622 | if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d)) | |
623 | return NULL; | |
624 | ||
625 | pud = pud_offset(p4d, address); | |
792230c3 JG |
626 | if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) |
627 | return NULL; | |
628 | ||
629 | return pmd_offset(pud, address); | |
630 | } | |
631 | ||
d7656534 DH |
632 | /* |
633 | * This is necessary because __pa() does not work on some | |
634 | * kinds of memory, like vmalloc() or the alloc_remap() | |
635 | * areas on 32-bit NUMA systems. The percpu areas can | |
636 | * end up in this kind of memory, for instance. | |
637 | * | |
638 | * This could be optimized, but it is only intended to be | |
639 | * used at inititalization time, and keeping it | |
640 | * unoptimized should increase the testing coverage for | |
641 | * the more obscure platforms. | |
642 | */ | |
643 | phys_addr_t slow_virt_to_phys(void *__virt_addr) | |
644 | { | |
645 | unsigned long virt_addr = (unsigned long)__virt_addr; | |
bf70e551 DC |
646 | phys_addr_t phys_addr; |
647 | unsigned long offset; | |
d7656534 | 648 | enum pg_level level; |
d7656534 DH |
649 | pte_t *pte; |
650 | ||
651 | pte = lookup_address(virt_addr, &level); | |
652 | BUG_ON(!pte); | |
34437e67 | 653 | |
bf70e551 DC |
654 | /* |
655 | * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t | |
656 | * before being left-shifted PAGE_SHIFT bits -- this trick is to | |
657 | * make 32-PAE kernel work correctly. | |
658 | */ | |
34437e67 TK |
659 | switch (level) { |
660 | case PG_LEVEL_1G: | |
bf70e551 | 661 | phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; |
34437e67 TK |
662 | offset = virt_addr & ~PUD_PAGE_MASK; |
663 | break; | |
664 | case PG_LEVEL_2M: | |
bf70e551 | 665 | phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; |
34437e67 TK |
666 | offset = virt_addr & ~PMD_PAGE_MASK; |
667 | break; | |
668 | default: | |
bf70e551 | 669 | phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; |
34437e67 TK |
670 | offset = virt_addr & ~PAGE_MASK; |
671 | } | |
672 | ||
673 | return (phys_addr_t)(phys_addr | offset); | |
d7656534 DH |
674 | } |
675 | EXPORT_SYMBOL_GPL(slow_virt_to_phys); | |
676 | ||
9df84993 IM |
677 | /* |
678 | * Set the new pmd in all the pgds we know about: | |
679 | */ | |
9a3dc780 | 680 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 681 | { |
9f4c815c IM |
682 | /* change init_mm */ |
683 | set_pte_atomic(kpte, pte); | |
44af6c41 | 684 | #ifdef CONFIG_X86_32 |
e4b71dcf | 685 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
686 | struct page *page; |
687 | ||
e3ed910d | 688 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 | 689 | pgd_t *pgd; |
45478336 | 690 | p4d_t *p4d; |
44af6c41 IM |
691 | pud_t *pud; |
692 | pmd_t *pmd; | |
693 | ||
694 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
45478336 KS |
695 | p4d = p4d_offset(pgd, address); |
696 | pud = pud_offset(p4d, address); | |
44af6c41 IM |
697 | pmd = pmd_offset(pud, address); |
698 | set_pte_atomic((pte_t *)pmd, pte); | |
699 | } | |
1da177e4 | 700 | } |
44af6c41 | 701 | #endif |
1da177e4 LT |
702 | } |
703 | ||
d1440b23 DH |
704 | static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) |
705 | { | |
706 | /* | |
707 | * _PAGE_GLOBAL means "global page" for present PTEs. | |
708 | * But, it is also used to indicate _PAGE_PROTNONE | |
709 | * for non-present PTEs. | |
710 | * | |
711 | * This ensures that a _PAGE_GLOBAL PTE going from | |
712 | * present to non-present is not confused as | |
713 | * _PAGE_PROTNONE. | |
714 | */ | |
715 | if (!(pgprot_val(prot) & _PAGE_PRESENT)) | |
716 | pgprot_val(prot) &= ~_PAGE_GLOBAL; | |
717 | ||
718 | return prot; | |
719 | } | |
720 | ||
8679de09 TG |
721 | static int __should_split_large_page(pte_t *kpte, unsigned long address, |
722 | struct cpa_data *cpa) | |
65e074df | 723 | { |
585948f4 | 724 | unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; |
f61c5ba2 | 725 | pgprot_t old_prot, new_prot, req_prot, chk_prot; |
8679de09 | 726 | pte_t new_pte, old_pte, *tmp; |
f3c4fbb6 | 727 | enum pg_level level; |
65e074df | 728 | |
65e074df TG |
729 | /* |
730 | * Check for races, another CPU might have split this page | |
731 | * up already: | |
732 | */ | |
82f0712c | 733 | tmp = _lookup_address_cpa(cpa, address, &level); |
65e074df | 734 | if (tmp != kpte) |
8679de09 | 735 | return 1; |
65e074df TG |
736 | |
737 | switch (level) { | |
738 | case PG_LEVEL_2M: | |
3a19109e TK |
739 | old_prot = pmd_pgprot(*(pmd_t *)kpte); |
740 | old_pfn = pmd_pfn(*(pmd_t *)kpte); | |
5c280cf6 | 741 | cpa_inc_2m_checked(); |
3a19109e | 742 | break; |
65e074df | 743 | case PG_LEVEL_1G: |
3a19109e TK |
744 | old_prot = pud_pgprot(*(pud_t *)kpte); |
745 | old_pfn = pud_pfn(*(pud_t *)kpte); | |
5c280cf6 | 746 | cpa_inc_1g_checked(); |
f3c4fbb6 | 747 | break; |
65e074df | 748 | default: |
8679de09 | 749 | return -EINVAL; |
65e074df TG |
750 | } |
751 | ||
3a19109e TK |
752 | psize = page_level_size(level); |
753 | pmask = page_level_mask(level); | |
754 | ||
65e074df TG |
755 | /* |
756 | * Calculate the number of pages, which fit into this large | |
757 | * page starting at address: | |
758 | */ | |
8679de09 TG |
759 | lpaddr = (address + psize) & pmask; |
760 | numpages = (lpaddr - address) >> PAGE_SHIFT; | |
9b5cf48b RW |
761 | if (numpages < cpa->numpages) |
762 | cpa->numpages = numpages; | |
65e074df TG |
763 | |
764 | /* | |
765 | * We are safe now. Check whether the new pgprot is the same: | |
f5b2831d JG |
766 | * Convert protection attributes to 4k-format, as cpa->mask* are set |
767 | * up accordingly. | |
65e074df TG |
768 | */ |
769 | old_pte = *kpte; | |
606c7193 | 770 | /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ |
55696b1f | 771 | req_prot = pgprot_large_2_4k(old_prot); |
65e074df | 772 | |
64edc8ed | 773 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
774 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | |
c31c7d48 | 775 | |
f5b2831d JG |
776 | /* |
777 | * req_prot is in format of 4k pages. It must be converted to large | |
778 | * page format: the caching mode includes the PAT bit located at | |
779 | * different bit positions in the two formats. | |
780 | */ | |
781 | req_prot = pgprot_4k_2_large(req_prot); | |
d1440b23 | 782 | req_prot = pgprot_clear_protnone_bits(req_prot); |
f76cfa3c | 783 | if (pgprot_val(req_prot) & _PAGE_PRESENT) |
d1440b23 | 784 | pgprot_val(req_prot) |= _PAGE_PSE; |
a8aed3e0 | 785 | |
c31c7d48 | 786 | /* |
8679de09 TG |
787 | * old_pfn points to the large page base pfn. So we need to add the |
788 | * offset of the virtual address: | |
c31c7d48 | 789 | */ |
3a19109e | 790 | pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); |
c31c7d48 TG |
791 | cpa->pfn = pfn; |
792 | ||
8679de09 TG |
793 | /* |
794 | * Calculate the large page base address and the number of 4K pages | |
795 | * in the large page | |
796 | */ | |
797 | lpaddr = address & pmask; | |
798 | numpages = psize >> PAGE_SHIFT; | |
65e074df | 799 | |
f61c5ba2 TG |
800 | /* |
801 | * Sanity check that the existing mapping is correct versus the static | |
802 | * protections. static_protections() guards against !PRESENT, so no | |
803 | * extra conditional required here. | |
804 | */ | |
805 | chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, | |
806 | CPA_CONFLICT); | |
807 | ||
808 | if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { | |
809 | /* | |
810 | * Split the large page and tell the split code to | |
811 | * enforce static protections. | |
812 | */ | |
813 | cpa->force_static_prot = 1; | |
814 | return 1; | |
815 | } | |
816 | ||
1c4b406e TG |
817 | /* |
818 | * Optimization: If the requested pgprot is the same as the current | |
819 | * pgprot, then the large page can be preserved and no updates are | |
820 | * required independent of alignment and length of the requested | |
821 | * range. The above already established that the current pgprot is | |
822 | * correct, which in consequence makes the requested pgprot correct | |
823 | * as well if it is the same. The static protection scan below will | |
824 | * not come to a different conclusion. | |
825 | */ | |
826 | if (pgprot_val(req_prot) == pgprot_val(old_prot)) { | |
827 | cpa_inc_lp_sameprot(level); | |
828 | return 0; | |
829 | } | |
830 | ||
fac84939 | 831 | /* |
585948f4 | 832 | * If the requested range does not cover the full page, split it up |
9cc9f17a | 833 | */ |
585948f4 TG |
834 | if (address != lpaddr || cpa->numpages != numpages) |
835 | return 1; | |
9cc9f17a TG |
836 | |
837 | /* | |
585948f4 TG |
838 | * Check whether the requested pgprot is conflicting with a static |
839 | * protection requirement in the large page. | |
fac84939 | 840 | */ |
585948f4 TG |
841 | new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, |
842 | CPA_DETECT); | |
65e074df TG |
843 | |
844 | /* | |
585948f4 TG |
845 | * If there is a conflict, split the large page. |
846 | * | |
847 | * There used to be a 4k wise evaluation trying really hard to | |
848 | * preserve the large pages, but experimentation has shown, that this | |
849 | * does not help at all. There might be corner cases which would | |
850 | * preserve one large page occasionally, but it's really not worth the | |
851 | * extra code and cycles for the common case. | |
65e074df | 852 | */ |
585948f4 | 853 | if (pgprot_val(req_prot) != pgprot_val(new_prot)) |
8679de09 TG |
854 | return 1; |
855 | ||
856 | /* All checks passed. Update the large page mapping. */ | |
857 | new_pte = pfn_pte(old_pfn, new_prot); | |
858 | __set_pmd_pte(kpte, address, new_pte); | |
859 | cpa->flags |= CPA_FLUSHTLB; | |
5c280cf6 | 860 | cpa_inc_lp_preserved(level); |
8679de09 TG |
861 | return 0; |
862 | } | |
863 | ||
864 | static int should_split_large_page(pte_t *kpte, unsigned long address, | |
865 | struct cpa_data *cpa) | |
866 | { | |
867 | int do_split; | |
868 | ||
869 | if (cpa->force_split) | |
870 | return 1; | |
65e074df | 871 | |
8679de09 TG |
872 | spin_lock(&pgd_lock); |
873 | do_split = __should_split_large_page(kpte, address, cpa); | |
a79e53d8 | 874 | spin_unlock(&pgd_lock); |
9df84993 | 875 | |
beaff633 | 876 | return do_split; |
65e074df TG |
877 | } |
878 | ||
f61c5ba2 TG |
879 | static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, |
880 | pgprot_t ref_prot, unsigned long address, | |
881 | unsigned long size) | |
882 | { | |
883 | unsigned int npg = PFN_DOWN(size); | |
884 | pgprot_t prot; | |
885 | ||
886 | /* | |
887 | * If should_split_large_page() discovered an inconsistent mapping, | |
888 | * remove the invalid protection in the split mapping. | |
889 | */ | |
890 | if (!cpa->force_static_prot) | |
891 | goto set; | |
892 | ||
893 | prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); | |
894 | ||
895 | if (pgprot_val(prot) == pgprot_val(ref_prot)) | |
896 | goto set; | |
897 | ||
898 | /* | |
899 | * If this is splitting a PMD, fix it up. PUD splits cannot be | |
900 | * fixed trivially as that would require to rescan the newly | |
901 | * installed PMD mappings after returning from split_large_page() | |
902 | * so an eventual further split can allocate the necessary PTE | |
903 | * pages. Warn for now and revisit it in case this actually | |
904 | * happens. | |
905 | */ | |
906 | if (size == PAGE_SIZE) | |
907 | ref_prot = prot; | |
908 | else | |
909 | pr_warn_once("CPA: Cannot fixup static protections for PUD split\n"); | |
910 | set: | |
911 | set_pte(pte, pfn_pte(pfn, ref_prot)); | |
912 | } | |
913 | ||
5952886b | 914 | static int |
82f0712c BP |
915 | __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, |
916 | struct page *base) | |
bb5c2dbd | 917 | { |
f61c5ba2 | 918 | unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1; |
5952886b | 919 | pte_t *pbase = (pte_t *)page_address(base); |
9df84993 | 920 | unsigned int i, level; |
9df84993 | 921 | pgprot_t ref_prot; |
f61c5ba2 | 922 | pte_t *tmp; |
bb5c2dbd | 923 | |
a79e53d8 | 924 | spin_lock(&pgd_lock); |
bb5c2dbd IM |
925 | /* |
926 | * Check for races, another CPU might have split this page | |
927 | * up for us already: | |
928 | */ | |
82f0712c | 929 | tmp = _lookup_address_cpa(cpa, address, &level); |
ae9aae9e WC |
930 | if (tmp != kpte) { |
931 | spin_unlock(&pgd_lock); | |
932 | return 1; | |
933 | } | |
bb5c2dbd | 934 | |
6944a9c8 | 935 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
f5b2831d | 936 | |
d551aaa2 TK |
937 | switch (level) { |
938 | case PG_LEVEL_2M: | |
939 | ref_prot = pmd_pgprot(*(pmd_t *)kpte); | |
606c7193 DH |
940 | /* |
941 | * Clear PSE (aka _PAGE_PAT) and move | |
942 | * PAT bit to correct position. | |
943 | */ | |
f5b2831d | 944 | ref_prot = pgprot_large_2_4k(ref_prot); |
d551aaa2 | 945 | ref_pfn = pmd_pfn(*(pmd_t *)kpte); |
f61c5ba2 TG |
946 | lpaddr = address & PMD_MASK; |
947 | lpinc = PAGE_SIZE; | |
d551aaa2 | 948 | break; |
bb5c2dbd | 949 | |
d551aaa2 TK |
950 | case PG_LEVEL_1G: |
951 | ref_prot = pud_pgprot(*(pud_t *)kpte); | |
952 | ref_pfn = pud_pfn(*(pud_t *)kpte); | |
f07333fd | 953 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
f61c5ba2 TG |
954 | lpaddr = address & PUD_MASK; |
955 | lpinc = PMD_SIZE; | |
a8aed3e0 | 956 | /* |
d551aaa2 | 957 | * Clear the PSE flags if the PRESENT flag is not set |
a8aed3e0 AA |
958 | * otherwise pmd_present/pmd_huge will return true |
959 | * even on a non present pmd. | |
960 | */ | |
d551aaa2 | 961 | if (!(pgprot_val(ref_prot) & _PAGE_PRESENT)) |
a8aed3e0 | 962 | pgprot_val(ref_prot) &= ~_PAGE_PSE; |
d551aaa2 TK |
963 | break; |
964 | ||
965 | default: | |
966 | spin_unlock(&pgd_lock); | |
967 | return 1; | |
f07333fd | 968 | } |
f07333fd | 969 | |
d1440b23 | 970 | ref_prot = pgprot_clear_protnone_bits(ref_prot); |
a8aed3e0 | 971 | |
63c1dcf4 TG |
972 | /* |
973 | * Get the target pfn from the original entry: | |
974 | */ | |
d551aaa2 | 975 | pfn = ref_pfn; |
f61c5ba2 TG |
976 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc) |
977 | split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc); | |
bb5c2dbd | 978 | |
2c66e24d SP |
979 | if (virt_addr_valid(address)) { |
980 | unsigned long pfn = PFN_DOWN(__pa(address)); | |
981 | ||
982 | if (pfn_range_is_mapped(pfn, pfn + 1)) | |
983 | split_page_count(level); | |
984 | } | |
f361a450 | 985 | |
bb5c2dbd | 986 | /* |
07a66d7c | 987 | * Install the new, split up pagetable. |
4c881ca1 | 988 | * |
07a66d7c IM |
989 | * We use the standard kernel pagetable protections for the new |
990 | * pagetable protections, the actual ptes set above control the | |
991 | * primary protection behavior: | |
bb5c2dbd | 992 | */ |
07a66d7c | 993 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
211b3d03 IM |
994 | |
995 | /* | |
c0a759ab PZ |
996 | * Do a global flush tlb after splitting the large page |
997 | * and before we do the actual change page attribute in the PTE. | |
211b3d03 | 998 | * |
c0a759ab PZ |
999 | * Without this, we violate the TLB application note, that says: |
1000 | * "The TLBs may contain both ordinary and large-page | |
1001 | * translations for a 4-KByte range of linear addresses. This | |
1002 | * may occur if software modifies the paging structures so that | |
1003 | * the page size used for the address range changes. If the two | |
1004 | * translations differ with respect to page frame or attributes | |
1005 | * (e.g., permissions), processor behavior is undefined and may | |
1006 | * be implementation-specific." | |
1007 | * | |
1008 | * We do this global tlb flush inside the cpa_lock, so that we | |
1009 | * don't allow any other cpu, with stale tlb entries change the | |
1010 | * page attribute in parallel, that also falls into the | |
1011 | * just split large page entry. | |
211b3d03 | 1012 | */ |
c0a759ab | 1013 | flush_tlb_all(); |
ae9aae9e | 1014 | spin_unlock(&pgd_lock); |
211b3d03 | 1015 | |
ae9aae9e WC |
1016 | return 0; |
1017 | } | |
bb5c2dbd | 1018 | |
82f0712c BP |
1019 | static int split_large_page(struct cpa_data *cpa, pte_t *kpte, |
1020 | unsigned long address) | |
ae9aae9e | 1021 | { |
ae9aae9e WC |
1022 | struct page *base; |
1023 | ||
288cf3c6 | 1024 | if (!debug_pagealloc_enabled()) |
ae9aae9e | 1025 | spin_unlock(&cpa_lock); |
75f296d9 | 1026 | base = alloc_pages(GFP_KERNEL, 0); |
288cf3c6 | 1027 | if (!debug_pagealloc_enabled()) |
ae9aae9e WC |
1028 | spin_lock(&cpa_lock); |
1029 | if (!base) | |
1030 | return -ENOMEM; | |
1031 | ||
82f0712c | 1032 | if (__split_large_page(cpa, kpte, address, base)) |
8311eb84 | 1033 | __free_page(base); |
bb5c2dbd | 1034 | |
bb5c2dbd IM |
1035 | return 0; |
1036 | } | |
1037 | ||
52a628fb BP |
1038 | static bool try_to_free_pte_page(pte_t *pte) |
1039 | { | |
1040 | int i; | |
1041 | ||
1042 | for (i = 0; i < PTRS_PER_PTE; i++) | |
1043 | if (!pte_none(pte[i])) | |
1044 | return false; | |
1045 | ||
1046 | free_page((unsigned long)pte); | |
1047 | return true; | |
1048 | } | |
1049 | ||
1050 | static bool try_to_free_pmd_page(pmd_t *pmd) | |
1051 | { | |
1052 | int i; | |
1053 | ||
1054 | for (i = 0; i < PTRS_PER_PMD; i++) | |
1055 | if (!pmd_none(pmd[i])) | |
1056 | return false; | |
1057 | ||
1058 | free_page((unsigned long)pmd); | |
1059 | return true; | |
1060 | } | |
1061 | ||
1062 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) | |
1063 | { | |
1064 | pte_t *pte = pte_offset_kernel(pmd, start); | |
1065 | ||
1066 | while (start < end) { | |
1067 | set_pte(pte, __pte(0)); | |
1068 | ||
1069 | start += PAGE_SIZE; | |
1070 | pte++; | |
1071 | } | |
1072 | ||
1073 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { | |
1074 | pmd_clear(pmd); | |
1075 | return true; | |
1076 | } | |
1077 | return false; | |
1078 | } | |
1079 | ||
1080 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, | |
1081 | unsigned long start, unsigned long end) | |
1082 | { | |
1083 | if (unmap_pte_range(pmd, start, end)) | |
1084 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | |
1085 | pud_clear(pud); | |
1086 | } | |
1087 | ||
1088 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | |
1089 | { | |
1090 | pmd_t *pmd = pmd_offset(pud, start); | |
1091 | ||
1092 | /* | |
1093 | * Not on a 2MB page boundary? | |
1094 | */ | |
1095 | if (start & (PMD_SIZE - 1)) { | |
1096 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | |
1097 | unsigned long pre_end = min_t(unsigned long, end, next_page); | |
1098 | ||
1099 | __unmap_pmd_range(pud, pmd, start, pre_end); | |
1100 | ||
1101 | start = pre_end; | |
1102 | pmd++; | |
1103 | } | |
1104 | ||
1105 | /* | |
1106 | * Try to unmap in 2M chunks. | |
1107 | */ | |
1108 | while (end - start >= PMD_SIZE) { | |
1109 | if (pmd_large(*pmd)) | |
1110 | pmd_clear(pmd); | |
1111 | else | |
1112 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); | |
1113 | ||
1114 | start += PMD_SIZE; | |
1115 | pmd++; | |
1116 | } | |
1117 | ||
1118 | /* | |
1119 | * 4K leftovers? | |
1120 | */ | |
1121 | if (start < end) | |
1122 | return __unmap_pmd_range(pud, pmd, start, end); | |
1123 | ||
1124 | /* | |
1125 | * Try again to free the PMD page if haven't succeeded above. | |
1126 | */ | |
1127 | if (!pud_none(*pud)) | |
1128 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | |
1129 | pud_clear(pud); | |
1130 | } | |
0bb8aeee | 1131 | |
45478336 | 1132 | static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) |
0bb8aeee | 1133 | { |
45478336 | 1134 | pud_t *pud = pud_offset(p4d, start); |
0bb8aeee BP |
1135 | |
1136 | /* | |
1137 | * Not on a GB page boundary? | |
1138 | */ | |
1139 | if (start & (PUD_SIZE - 1)) { | |
1140 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | |
1141 | unsigned long pre_end = min_t(unsigned long, end, next_page); | |
1142 | ||
1143 | unmap_pmd_range(pud, start, pre_end); | |
1144 | ||
1145 | start = pre_end; | |
1146 | pud++; | |
1147 | } | |
1148 | ||
1149 | /* | |
1150 | * Try to unmap in 1G chunks? | |
1151 | */ | |
1152 | while (end - start >= PUD_SIZE) { | |
1153 | ||
1154 | if (pud_large(*pud)) | |
1155 | pud_clear(pud); | |
1156 | else | |
1157 | unmap_pmd_range(pud, start, start + PUD_SIZE); | |
1158 | ||
1159 | start += PUD_SIZE; | |
1160 | pud++; | |
1161 | } | |
1162 | ||
1163 | /* | |
1164 | * 2M leftovers? | |
1165 | */ | |
1166 | if (start < end) | |
1167 | unmap_pmd_range(pud, start, end); | |
1168 | ||
1169 | /* | |
1170 | * No need to try to free the PUD page because we'll free it in | |
1171 | * populate_pgd's error path | |
1172 | */ | |
1173 | } | |
1174 | ||
f900a4b8 BP |
1175 | static int alloc_pte_page(pmd_t *pmd) |
1176 | { | |
75f296d9 | 1177 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); |
f900a4b8 BP |
1178 | if (!pte) |
1179 | return -1; | |
1180 | ||
1181 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); | |
1182 | return 0; | |
1183 | } | |
1184 | ||
4b23538d BP |
1185 | static int alloc_pmd_page(pud_t *pud) |
1186 | { | |
75f296d9 | 1187 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
4b23538d BP |
1188 | if (!pmd) |
1189 | return -1; | |
1190 | ||
1191 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | |
1192 | return 0; | |
1193 | } | |
1194 | ||
c6b6f363 BP |
1195 | static void populate_pte(struct cpa_data *cpa, |
1196 | unsigned long start, unsigned long end, | |
1197 | unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) | |
1198 | { | |
1199 | pte_t *pte; | |
1200 | ||
1201 | pte = pte_offset_kernel(pmd, start); | |
1202 | ||
d1440b23 | 1203 | pgprot = pgprot_clear_protnone_bits(pgprot); |
c6b6f363 | 1204 | |
c6b6f363 | 1205 | while (num_pages-- && start < end) { |
edc3b912 | 1206 | set_pte(pte, pfn_pte(cpa->pfn, pgprot)); |
c6b6f363 BP |
1207 | |
1208 | start += PAGE_SIZE; | |
edc3b912 | 1209 | cpa->pfn++; |
c6b6f363 BP |
1210 | pte++; |
1211 | } | |
1212 | } | |
f900a4b8 | 1213 | |
e535ec08 MF |
1214 | static long populate_pmd(struct cpa_data *cpa, |
1215 | unsigned long start, unsigned long end, | |
1216 | unsigned num_pages, pud_t *pud, pgprot_t pgprot) | |
f900a4b8 | 1217 | { |
e535ec08 | 1218 | long cur_pages = 0; |
f900a4b8 | 1219 | pmd_t *pmd; |
f5b2831d | 1220 | pgprot_t pmd_pgprot; |
f900a4b8 BP |
1221 | |
1222 | /* | |
1223 | * Not on a 2M boundary? | |
1224 | */ | |
1225 | if (start & (PMD_SIZE - 1)) { | |
1226 | unsigned long pre_end = start + (num_pages << PAGE_SHIFT); | |
1227 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | |
1228 | ||
1229 | pre_end = min_t(unsigned long, pre_end, next_page); | |
1230 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | |
1231 | cur_pages = min_t(unsigned int, num_pages, cur_pages); | |
1232 | ||
1233 | /* | |
1234 | * Need a PTE page? | |
1235 | */ | |
1236 | pmd = pmd_offset(pud, start); | |
1237 | if (pmd_none(*pmd)) | |
1238 | if (alloc_pte_page(pmd)) | |
1239 | return -1; | |
1240 | ||
1241 | populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); | |
1242 | ||
1243 | start = pre_end; | |
1244 | } | |
1245 | ||
1246 | /* | |
1247 | * We mapped them all? | |
1248 | */ | |
1249 | if (num_pages == cur_pages) | |
1250 | return cur_pages; | |
1251 | ||
f5b2831d JG |
1252 | pmd_pgprot = pgprot_4k_2_large(pgprot); |
1253 | ||
f900a4b8 BP |
1254 | while (end - start >= PMD_SIZE) { |
1255 | ||
1256 | /* | |
1257 | * We cannot use a 1G page so allocate a PMD page if needed. | |
1258 | */ | |
1259 | if (pud_none(*pud)) | |
1260 | if (alloc_pmd_page(pud)) | |
1261 | return -1; | |
1262 | ||
1263 | pmd = pmd_offset(pud, start); | |
1264 | ||
958f79b9 AK |
1265 | set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, |
1266 | canon_pgprot(pmd_pgprot)))); | |
f900a4b8 BP |
1267 | |
1268 | start += PMD_SIZE; | |
edc3b912 | 1269 | cpa->pfn += PMD_SIZE >> PAGE_SHIFT; |
f900a4b8 BP |
1270 | cur_pages += PMD_SIZE >> PAGE_SHIFT; |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * Map trailing 4K pages. | |
1275 | */ | |
1276 | if (start < end) { | |
1277 | pmd = pmd_offset(pud, start); | |
1278 | if (pmd_none(*pmd)) | |
1279 | if (alloc_pte_page(pmd)) | |
1280 | return -1; | |
1281 | ||
1282 | populate_pte(cpa, start, end, num_pages - cur_pages, | |
1283 | pmd, pgprot); | |
1284 | } | |
1285 | return num_pages; | |
1286 | } | |
4b23538d | 1287 | |
45478336 KS |
1288 | static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, |
1289 | pgprot_t pgprot) | |
4b23538d BP |
1290 | { |
1291 | pud_t *pud; | |
1292 | unsigned long end; | |
e535ec08 | 1293 | long cur_pages = 0; |
f5b2831d | 1294 | pgprot_t pud_pgprot; |
4b23538d BP |
1295 | |
1296 | end = start + (cpa->numpages << PAGE_SHIFT); | |
1297 | ||
1298 | /* | |
1299 | * Not on a Gb page boundary? => map everything up to it with | |
1300 | * smaller pages. | |
1301 | */ | |
1302 | if (start & (PUD_SIZE - 1)) { | |
1303 | unsigned long pre_end; | |
1304 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | |
1305 | ||
1306 | pre_end = min_t(unsigned long, end, next_page); | |
1307 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | |
1308 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); | |
1309 | ||
45478336 | 1310 | pud = pud_offset(p4d, start); |
4b23538d BP |
1311 | |
1312 | /* | |
1313 | * Need a PMD page? | |
1314 | */ | |
1315 | if (pud_none(*pud)) | |
1316 | if (alloc_pmd_page(pud)) | |
1317 | return -1; | |
1318 | ||
1319 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, | |
1320 | pud, pgprot); | |
1321 | if (cur_pages < 0) | |
1322 | return cur_pages; | |
1323 | ||
1324 | start = pre_end; | |
1325 | } | |
1326 | ||
1327 | /* We mapped them all? */ | |
1328 | if (cpa->numpages == cur_pages) | |
1329 | return cur_pages; | |
1330 | ||
45478336 | 1331 | pud = pud_offset(p4d, start); |
f5b2831d | 1332 | pud_pgprot = pgprot_4k_2_large(pgprot); |
4b23538d BP |
1333 | |
1334 | /* | |
1335 | * Map everything starting from the Gb boundary, possibly with 1G pages | |
1336 | */ | |
b8291adc | 1337 | while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { |
958f79b9 AK |
1338 | set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, |
1339 | canon_pgprot(pud_pgprot)))); | |
4b23538d BP |
1340 | |
1341 | start += PUD_SIZE; | |
edc3b912 | 1342 | cpa->pfn += PUD_SIZE >> PAGE_SHIFT; |
4b23538d BP |
1343 | cur_pages += PUD_SIZE >> PAGE_SHIFT; |
1344 | pud++; | |
1345 | } | |
1346 | ||
1347 | /* Map trailing leftover */ | |
1348 | if (start < end) { | |
e535ec08 | 1349 | long tmp; |
4b23538d | 1350 | |
45478336 | 1351 | pud = pud_offset(p4d, start); |
4b23538d BP |
1352 | if (pud_none(*pud)) |
1353 | if (alloc_pmd_page(pud)) | |
1354 | return -1; | |
1355 | ||
1356 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, | |
1357 | pud, pgprot); | |
1358 | if (tmp < 0) | |
1359 | return cur_pages; | |
1360 | ||
1361 | cur_pages += tmp; | |
1362 | } | |
1363 | return cur_pages; | |
1364 | } | |
f3f72966 BP |
1365 | |
1366 | /* | |
1367 | * Restrictions for kernel page table do not necessarily apply when mapping in | |
1368 | * an alternate PGD. | |
1369 | */ | |
1370 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) | |
1371 | { | |
1372 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); | |
f3f72966 | 1373 | pud_t *pud = NULL; /* shut up gcc */ |
45478336 | 1374 | p4d_t *p4d; |
42a54772 | 1375 | pgd_t *pgd_entry; |
e535ec08 | 1376 | long ret; |
f3f72966 BP |
1377 | |
1378 | pgd_entry = cpa->pgd + pgd_index(addr); | |
1379 | ||
45478336 | 1380 | if (pgd_none(*pgd_entry)) { |
75f296d9 | 1381 | p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); |
45478336 KS |
1382 | if (!p4d) |
1383 | return -1; | |
1384 | ||
1385 | set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE)); | |
1386 | } | |
1387 | ||
f3f72966 BP |
1388 | /* |
1389 | * Allocate a PUD page and hand it down for mapping. | |
1390 | */ | |
45478336 KS |
1391 | p4d = p4d_offset(pgd_entry, addr); |
1392 | if (p4d_none(*p4d)) { | |
75f296d9 | 1393 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL); |
f3f72966 BP |
1394 | if (!pud) |
1395 | return -1; | |
530dd8d4 | 1396 | |
45478336 | 1397 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); |
f3f72966 BP |
1398 | } |
1399 | ||
1400 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); | |
1401 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); | |
1402 | ||
45478336 | 1403 | ret = populate_pud(cpa, addr, p4d, pgprot); |
0bb8aeee | 1404 | if (ret < 0) { |
55920d31 AL |
1405 | /* |
1406 | * Leave the PUD page in place in case some other CPU or thread | |
1407 | * already found it, but remove any useless entries we just | |
1408 | * added to it. | |
1409 | */ | |
45478336 | 1410 | unmap_pud_range(p4d, addr, |
0bb8aeee | 1411 | addr + (cpa->numpages << PAGE_SHIFT)); |
f3f72966 | 1412 | return ret; |
0bb8aeee | 1413 | } |
42a54772 | 1414 | |
f3f72966 BP |
1415 | cpa->numpages = ret; |
1416 | return 0; | |
1417 | } | |
1418 | ||
a1e46212 SS |
1419 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
1420 | int primary) | |
1421 | { | |
7fc8442f MF |
1422 | if (cpa->pgd) { |
1423 | /* | |
1424 | * Right now, we only execute this code path when mapping | |
1425 | * the EFI virtual memory map regions, no other users | |
1426 | * provide a ->pgd value. This may change in the future. | |
1427 | */ | |
82f0712c | 1428 | return populate_pgd(cpa, vaddr); |
7fc8442f | 1429 | } |
82f0712c | 1430 | |
a1e46212 SS |
1431 | /* |
1432 | * Ignore all non primary paths. | |
1433 | */ | |
405e1133 JB |
1434 | if (!primary) { |
1435 | cpa->numpages = 1; | |
a1e46212 | 1436 | return 0; |
405e1133 | 1437 | } |
a1e46212 SS |
1438 | |
1439 | /* | |
1440 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | |
1441 | * to have holes. | |
1442 | * Also set numpages to '1' indicating that we processed cpa req for | |
1443 | * one virtual address page and its pfn. TBD: numpages can be set based | |
1444 | * on the initial value and the level returned by lookup_address(). | |
1445 | */ | |
1446 | if (within(vaddr, PAGE_OFFSET, | |
1447 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | |
1448 | cpa->numpages = 1; | |
1449 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | |
1450 | return 0; | |
58e65b51 DH |
1451 | |
1452 | } else if (__cpa_pfn_in_highmap(cpa->pfn)) { | |
1453 | /* Faults in the highmap are OK, so do not warn: */ | |
1454 | return -EFAULT; | |
a1e46212 SS |
1455 | } else { |
1456 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | |
1457 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | |
1458 | *cpa->vaddr); | |
1459 | ||
1460 | return -EFAULT; | |
1461 | } | |
1462 | } | |
1463 | ||
c31c7d48 | 1464 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
9f4c815c | 1465 | { |
d75586ad | 1466 | unsigned long address; |
da7bfc50 HH |
1467 | int do_split, err; |
1468 | unsigned int level; | |
c31c7d48 | 1469 | pte_t *kpte, old_pte; |
1da177e4 | 1470 | |
8523acfe TH |
1471 | if (cpa->flags & CPA_PAGES_ARRAY) { |
1472 | struct page *page = cpa->pages[cpa->curpage]; | |
1473 | if (unlikely(PageHighMem(page))) | |
1474 | return 0; | |
1475 | address = (unsigned long)page_address(page); | |
1476 | } else if (cpa->flags & CPA_ARRAY) | |
d75586ad SL |
1477 | address = cpa->vaddr[cpa->curpage]; |
1478 | else | |
1479 | address = *cpa->vaddr; | |
97f99fed | 1480 | repeat: |
82f0712c | 1481 | kpte = _lookup_address_cpa(cpa, address, &level); |
1da177e4 | 1482 | if (!kpte) |
a1e46212 | 1483 | return __cpa_process_fault(cpa, address, primary); |
c31c7d48 TG |
1484 | |
1485 | old_pte = *kpte; | |
dcb32d99 | 1486 | if (pte_none(old_pte)) |
a1e46212 | 1487 | return __cpa_process_fault(cpa, address, primary); |
9f4c815c | 1488 | |
30551bb3 | 1489 | if (level == PG_LEVEL_4K) { |
c31c7d48 | 1490 | pte_t new_pte; |
626c2c9d | 1491 | pgprot_t new_prot = pte_pgprot(old_pte); |
c31c7d48 | 1492 | unsigned long pfn = pte_pfn(old_pte); |
86f03989 | 1493 | |
72e458df TG |
1494 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
1495 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 | 1496 | |
5c280cf6 | 1497 | cpa_inc_4k_install(); |
4046460b TG |
1498 | new_prot = static_protections(new_prot, address, pfn, 1, |
1499 | CPA_PROTECT); | |
86f03989 | 1500 | |
d1440b23 | 1501 | new_prot = pgprot_clear_protnone_bits(new_prot); |
a8aed3e0 | 1502 | |
626c2c9d AV |
1503 | /* |
1504 | * We need to keep the pfn from the existing PTE, | |
1505 | * after all we're only going to change it's attributes | |
1506 | * not the memory it points to | |
1507 | */ | |
1a54420a | 1508 | new_pte = pfn_pte(pfn, new_prot); |
c31c7d48 | 1509 | cpa->pfn = pfn; |
f4ae5da0 TG |
1510 | /* |
1511 | * Do we really change anything ? | |
1512 | */ | |
1513 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
1514 | set_pte_atomic(kpte, new_pte); | |
d75586ad | 1515 | cpa->flags |= CPA_FLUSHTLB; |
f4ae5da0 | 1516 | } |
9b5cf48b | 1517 | cpa->numpages = 1; |
65e074df | 1518 | return 0; |
1da177e4 | 1519 | } |
65e074df TG |
1520 | |
1521 | /* | |
1522 | * Check, whether we can keep the large page intact | |
1523 | * and just change the pte: | |
1524 | */ | |
8679de09 | 1525 | do_split = should_split_large_page(kpte, address, cpa); |
65e074df TG |
1526 | /* |
1527 | * When the range fits into the existing large page, | |
9b5cf48b | 1528 | * return. cp->numpages and cpa->tlbflush have been updated in |
65e074df TG |
1529 | * try_large_page: |
1530 | */ | |
87f7f8fe IM |
1531 | if (do_split <= 0) |
1532 | return do_split; | |
65e074df TG |
1533 | |
1534 | /* | |
1535 | * We have to split the large page: | |
1536 | */ | |
82f0712c | 1537 | err = split_large_page(cpa, kpte, address); |
c0a759ab | 1538 | if (!err) |
87f7f8fe | 1539 | goto repeat; |
beaff633 | 1540 | |
87f7f8fe | 1541 | return err; |
9f4c815c | 1542 | } |
1da177e4 | 1543 | |
c31c7d48 TG |
1544 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
1545 | ||
1546 | static int cpa_process_alias(struct cpa_data *cpa) | |
1da177e4 | 1547 | { |
c31c7d48 | 1548 | struct cpa_data alias_cpa; |
992f4c1c | 1549 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
e933a73f | 1550 | unsigned long vaddr; |
992f4c1c | 1551 | int ret; |
44af6c41 | 1552 | |
8eb5779f | 1553 | if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) |
c31c7d48 | 1554 | return 0; |
626c2c9d | 1555 | |
f34b439f TG |
1556 | /* |
1557 | * No need to redo, when the primary call touched the direct | |
1558 | * mapping already: | |
1559 | */ | |
8523acfe TH |
1560 | if (cpa->flags & CPA_PAGES_ARRAY) { |
1561 | struct page *page = cpa->pages[cpa->curpage]; | |
1562 | if (unlikely(PageHighMem(page))) | |
1563 | return 0; | |
1564 | vaddr = (unsigned long)page_address(page); | |
1565 | } else if (cpa->flags & CPA_ARRAY) | |
d75586ad SL |
1566 | vaddr = cpa->vaddr[cpa->curpage]; |
1567 | else | |
1568 | vaddr = *cpa->vaddr; | |
1569 | ||
1570 | if (!(within(vaddr, PAGE_OFFSET, | |
a1e46212 | 1571 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
44af6c41 | 1572 | |
f34b439f | 1573 | alias_cpa = *cpa; |
992f4c1c | 1574 | alias_cpa.vaddr = &laddr; |
9ae28475 | 1575 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
d75586ad | 1576 | |
f34b439f | 1577 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
992f4c1c TH |
1578 | if (ret) |
1579 | return ret; | |
f34b439f | 1580 | } |
44af6c41 | 1581 | |
44af6c41 | 1582 | #ifdef CONFIG_X86_64 |
488fd995 | 1583 | /* |
992f4c1c TH |
1584 | * If the primary call didn't touch the high mapping already |
1585 | * and the physical address is inside the kernel map, we need | |
0879750f | 1586 | * to touch the high mapped kernel as well: |
488fd995 | 1587 | */ |
992f4c1c | 1588 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
58e65b51 | 1589 | __cpa_pfn_in_highmap(cpa->pfn)) { |
992f4c1c TH |
1590 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + |
1591 | __START_KERNEL_map - phys_base; | |
1592 | alias_cpa = *cpa; | |
1593 | alias_cpa.vaddr = &temp_cpa_vaddr; | |
1594 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | |
c31c7d48 | 1595 | |
992f4c1c TH |
1596 | /* |
1597 | * The high mapping range is imprecise, so ignore the | |
1598 | * return value. | |
1599 | */ | |
1600 | __change_page_attr_set_clr(&alias_cpa, 0); | |
1601 | } | |
488fd995 | 1602 | #endif |
992f4c1c TH |
1603 | |
1604 | return 0; | |
1da177e4 LT |
1605 | } |
1606 | ||
c31c7d48 | 1607 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
ff31452b | 1608 | { |
e535ec08 MF |
1609 | unsigned long numpages = cpa->numpages; |
1610 | int ret; | |
ff31452b | 1611 | |
65e074df TG |
1612 | while (numpages) { |
1613 | /* | |
1614 | * Store the remaining nr of pages for the large page | |
1615 | * preservation check. | |
1616 | */ | |
9b5cf48b | 1617 | cpa->numpages = numpages; |
d75586ad | 1618 | /* for array changes, we can't use large page */ |
9ae28475 | 1619 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
d75586ad | 1620 | cpa->numpages = 1; |
c31c7d48 | 1621 | |
288cf3c6 | 1622 | if (!debug_pagealloc_enabled()) |
ad5ca55f | 1623 | spin_lock(&cpa_lock); |
c31c7d48 | 1624 | ret = __change_page_attr(cpa, checkalias); |
288cf3c6 | 1625 | if (!debug_pagealloc_enabled()) |
ad5ca55f | 1626 | spin_unlock(&cpa_lock); |
ff31452b TG |
1627 | if (ret) |
1628 | return ret; | |
ff31452b | 1629 | |
c31c7d48 TG |
1630 | if (checkalias) { |
1631 | ret = cpa_process_alias(cpa); | |
1632 | if (ret) | |
1633 | return ret; | |
1634 | } | |
1635 | ||
65e074df TG |
1636 | /* |
1637 | * Adjust the number of pages with the result of the | |
1638 | * CPA operation. Either a large page has been | |
1639 | * preserved or a single page update happened. | |
1640 | */ | |
74256377 | 1641 | BUG_ON(cpa->numpages > numpages || !cpa->numpages); |
9b5cf48b | 1642 | numpages -= cpa->numpages; |
9ae28475 | 1643 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) |
d75586ad SL |
1644 | cpa->curpage++; |
1645 | else | |
1646 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
1647 | ||
65e074df | 1648 | } |
ff31452b TG |
1649 | return 0; |
1650 | } | |
1651 | ||
c7486104 L |
1652 | /* |
1653 | * Machine check recovery code needs to change cache mode of poisoned | |
1654 | * pages to UC to avoid speculative access logging another error. But | |
1655 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | |
1656 | * way to encourage a speculative access. So we cheat and flip the top | |
1657 | * bit of the address. This works fine for the code that updates the | |
1658 | * page tables. But at the end of the process we need to flush the cache | |
1659 | * and the non-canonical address causes a #GP fault when used by the | |
1660 | * CLFLUSH instruction. | |
1661 | * | |
1662 | * But in the common case we already have a canonical address. This code | |
1663 | * will fix the top bit if needed and is a no-op otherwise. | |
1664 | */ | |
1665 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | |
1666 | { | |
1667 | #ifdef CONFIG_X86_64 | |
1668 | return (long)(addr << 1) >> 1; | |
1669 | #else | |
1670 | return addr; | |
1671 | #endif | |
1672 | } | |
1673 | ||
1674 | ||
d75586ad | 1675 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
c9caa02c | 1676 | pgprot_t mask_set, pgprot_t mask_clr, |
9ae28475 | 1677 | int force_split, int in_flag, |
1678 | struct page **pages) | |
ff31452b | 1679 | { |
72e458df | 1680 | struct cpa_data cpa; |
cacf8906 | 1681 | int ret, cache, checkalias; |
fa526d0d | 1682 | unsigned long baddr = 0; |
331e4065 | 1683 | |
82f0712c BP |
1684 | memset(&cpa, 0, sizeof(cpa)); |
1685 | ||
331e4065 | 1686 | /* |
39114b7a DH |
1687 | * Check, if we are requested to set a not supported |
1688 | * feature. Clearing non-supported features is OK. | |
331e4065 TG |
1689 | */ |
1690 | mask_set = canon_pgprot(mask_set); | |
39114b7a | 1691 | |
c9caa02c | 1692 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
331e4065 TG |
1693 | return 0; |
1694 | ||
69b1415e | 1695 | /* Ensure we are PAGE_SIZE aligned */ |
9ae28475 | 1696 | if (in_flag & CPA_ARRAY) { |
d75586ad SL |
1697 | int i; |
1698 | for (i = 0; i < numpages; i++) { | |
1699 | if (addr[i] & ~PAGE_MASK) { | |
1700 | addr[i] &= PAGE_MASK; | |
1701 | WARN_ON_ONCE(1); | |
1702 | } | |
1703 | } | |
9ae28475 | 1704 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { |
1705 | /* | |
1706 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. | |
1707 | * No need to cehck in that case | |
1708 | */ | |
1709 | if (*addr & ~PAGE_MASK) { | |
1710 | *addr &= PAGE_MASK; | |
1711 | /* | |
1712 | * People should not be passing in unaligned addresses: | |
1713 | */ | |
1714 | WARN_ON_ONCE(1); | |
1715 | } | |
fa526d0d JS |
1716 | /* |
1717 | * Save address for cache flush. *addr is modified in the call | |
1718 | * to __change_page_attr_set_clr() below. | |
1719 | */ | |
c7486104 | 1720 | baddr = make_addr_canonical_again(*addr); |
69b1415e TG |
1721 | } |
1722 | ||
5843d9a4 NP |
1723 | /* Must avoid aliasing mappings in the highmem code */ |
1724 | kmap_flush_unused(); | |
1725 | ||
db64fe02 NP |
1726 | vm_unmap_aliases(); |
1727 | ||
72e458df | 1728 | cpa.vaddr = addr; |
9ae28475 | 1729 | cpa.pages = pages; |
72e458df TG |
1730 | cpa.numpages = numpages; |
1731 | cpa.mask_set = mask_set; | |
1732 | cpa.mask_clr = mask_clr; | |
d75586ad SL |
1733 | cpa.flags = 0; |
1734 | cpa.curpage = 0; | |
c9caa02c | 1735 | cpa.force_split = force_split; |
72e458df | 1736 | |
9ae28475 | 1737 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
1738 | cpa.flags |= in_flag; | |
d75586ad | 1739 | |
af96e443 TG |
1740 | /* No alias checking for _NX bit modifications */ |
1741 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
c40a56a7 DH |
1742 | /* Has caller explicitly disabled alias checking? */ |
1743 | if (in_flag & CPA_NO_CHECK_ALIAS) | |
1744 | checkalias = 0; | |
af96e443 TG |
1745 | |
1746 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
ff31452b | 1747 | |
f4ae5da0 TG |
1748 | /* |
1749 | * Check whether we really changed something: | |
1750 | */ | |
d75586ad | 1751 | if (!(cpa.flags & CPA_FLUSHTLB)) |
1ac2f7d5 | 1752 | goto out; |
cacf8906 | 1753 | |
6bb8383b AK |
1754 | /* |
1755 | * No need to flush, when we did not set any of the caching | |
1756 | * attributes: | |
1757 | */ | |
c06814d8 | 1758 | cache = !!pgprot2cachemode(mask_set); |
6bb8383b | 1759 | |
57a6a46a | 1760 | /* |
fce2ce95 | 1761 | * On error; flush everything to be sure. |
57a6a46a | 1762 | */ |
fce2ce95 | 1763 | if (ret) { |
6bb8383b | 1764 | cpa_flush_all(cache); |
fce2ce95 PZ |
1765 | goto out; |
1766 | } | |
1767 | ||
1768 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { | |
1769 | cpa_flush_array(baddr, addr, numpages, cache, | |
1770 | cpa.flags, pages); | |
1771 | } else { | |
1772 | cpa_flush_range(baddr, numpages, cache); | |
1773 | } | |
cacf8906 | 1774 | |
76ebd054 | 1775 | out: |
ff31452b TG |
1776 | return ret; |
1777 | } | |
1778 | ||
d75586ad SL |
1779 | static inline int change_page_attr_set(unsigned long *addr, int numpages, |
1780 | pgprot_t mask, int array) | |
75cbade8 | 1781 | { |
d75586ad | 1782 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, |
9ae28475 | 1783 | (array ? CPA_ARRAY : 0), NULL); |
75cbade8 AV |
1784 | } |
1785 | ||
d75586ad SL |
1786 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, |
1787 | pgprot_t mask, int array) | |
72932c7a | 1788 | { |
d75586ad | 1789 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, |
9ae28475 | 1790 | (array ? CPA_ARRAY : 0), NULL); |
72932c7a TG |
1791 | } |
1792 | ||
0f350755 | 1793 | static inline int cpa_set_pages_array(struct page **pages, int numpages, |
1794 | pgprot_t mask) | |
1795 | { | |
1796 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, | |
1797 | CPA_PAGES_ARRAY, pages); | |
1798 | } | |
1799 | ||
1800 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, | |
1801 | pgprot_t mask) | |
1802 | { | |
1803 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, | |
1804 | CPA_PAGES_ARRAY, pages); | |
1805 | } | |
1806 | ||
1219333d | 1807 | int _set_memory_uc(unsigned long addr, int numpages) |
72932c7a | 1808 | { |
de33c442 SS |
1809 | /* |
1810 | * for now UC MINUS. see comments in ioremap_nocache() | |
e4b6be33 LR |
1811 | * If you really need strong UC use ioremap_uc(), but note |
1812 | * that you cannot override IO areas with set_memory_*() as | |
1813 | * these helpers cannot work with IO memory. | |
de33c442 | 1814 | */ |
d75586ad | 1815 | return change_page_attr_set(&addr, numpages, |
c06814d8 JG |
1816 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1817 | 0); | |
75cbade8 | 1818 | } |
1219333d | 1819 | |
1820 | int set_memory_uc(unsigned long addr, int numpages) | |
1821 | { | |
9fa3ab39 | 1822 | int ret; |
1823 | ||
de33c442 SS |
1824 | /* |
1825 | * for now UC MINUS. see comments in ioremap_nocache() | |
1826 | */ | |
9fa3ab39 | 1827 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
e00c8cc9 | 1828 | _PAGE_CACHE_MODE_UC_MINUS, NULL); |
9fa3ab39 | 1829 | if (ret) |
1830 | goto out_err; | |
1831 | ||
1832 | ret = _set_memory_uc(addr, numpages); | |
1833 | if (ret) | |
1834 | goto out_free; | |
1835 | ||
1836 | return 0; | |
1219333d | 1837 | |
9fa3ab39 | 1838 | out_free: |
1839 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1840 | out_err: | |
1841 | return ret; | |
1219333d | 1842 | } |
75cbade8 AV |
1843 | EXPORT_SYMBOL(set_memory_uc); |
1844 | ||
2d070eff | 1845 | static int _set_memory_array(unsigned long *addr, int addrinarray, |
c06814d8 | 1846 | enum page_cache_mode new_type) |
d75586ad | 1847 | { |
623dffb2 | 1848 | enum page_cache_mode set_type; |
9fa3ab39 | 1849 | int i, j; |
1850 | int ret; | |
1851 | ||
d75586ad | 1852 | for (i = 0; i < addrinarray; i++) { |
9fa3ab39 | 1853 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, |
4f646254 | 1854 | new_type, NULL); |
9fa3ab39 | 1855 | if (ret) |
1856 | goto out_free; | |
d75586ad SL |
1857 | } |
1858 | ||
623dffb2 TK |
1859 | /* If WC, set to UC- first and then WC */ |
1860 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? | |
1861 | _PAGE_CACHE_MODE_UC_MINUS : new_type; | |
1862 | ||
9fa3ab39 | 1863 | ret = change_page_attr_set(addr, addrinarray, |
623dffb2 | 1864 | cachemode2pgprot(set_type), 1); |
4f646254 | 1865 | |
c06814d8 | 1866 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
4f646254 | 1867 | ret = change_page_attr_set_clr(addr, addrinarray, |
c06814d8 JG |
1868 | cachemode2pgprot( |
1869 | _PAGE_CACHE_MODE_WC), | |
4f646254 PN |
1870 | __pgprot(_PAGE_CACHE_MASK), |
1871 | 0, CPA_ARRAY, NULL); | |
9fa3ab39 | 1872 | if (ret) |
1873 | goto out_free; | |
1874 | ||
1875 | return 0; | |
1876 | ||
1877 | out_free: | |
1878 | for (j = 0; j < i; j++) | |
1879 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); | |
1880 | ||
1881 | return ret; | |
d75586ad | 1882 | } |
4f646254 PN |
1883 | |
1884 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | |
1885 | { | |
c06814d8 | 1886 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
4f646254 | 1887 | } |
d75586ad SL |
1888 | EXPORT_SYMBOL(set_memory_array_uc); |
1889 | ||
4f646254 PN |
1890 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
1891 | { | |
c06814d8 | 1892 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); |
4f646254 PN |
1893 | } |
1894 | EXPORT_SYMBOL(set_memory_array_wc); | |
1895 | ||
623dffb2 TK |
1896 | int set_memory_array_wt(unsigned long *addr, int addrinarray) |
1897 | { | |
1898 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT); | |
1899 | } | |
1900 | EXPORT_SYMBOL_GPL(set_memory_array_wt); | |
1901 | ||
ef354af4 | 1902 | int _set_memory_wc(unsigned long addr, int numpages) |
1903 | { | |
3869c4aa | 1904 | int ret; |
bdc6340f PV |
1905 | unsigned long addr_copy = addr; |
1906 | ||
3869c4aa | 1907 | ret = change_page_attr_set(&addr, numpages, |
c06814d8 JG |
1908 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1909 | 0); | |
3869c4aa | 1910 | if (!ret) { |
bdc6340f | 1911 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
c06814d8 JG |
1912 | cachemode2pgprot( |
1913 | _PAGE_CACHE_MODE_WC), | |
bdc6340f PV |
1914 | __pgprot(_PAGE_CACHE_MASK), |
1915 | 0, 0, NULL); | |
3869c4aa | 1916 | } |
1917 | return ret; | |
ef354af4 | 1918 | } |
1919 | ||
1920 | int set_memory_wc(unsigned long addr, int numpages) | |
1921 | { | |
9fa3ab39 | 1922 | int ret; |
1923 | ||
9fa3ab39 | 1924 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
e00c8cc9 | 1925 | _PAGE_CACHE_MODE_WC, NULL); |
9fa3ab39 | 1926 | if (ret) |
623dffb2 | 1927 | return ret; |
ef354af4 | 1928 | |
9fa3ab39 | 1929 | ret = _set_memory_wc(addr, numpages); |
1930 | if (ret) | |
623dffb2 | 1931 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
9fa3ab39 | 1932 | |
9fa3ab39 | 1933 | return ret; |
ef354af4 | 1934 | } |
1935 | EXPORT_SYMBOL(set_memory_wc); | |
1936 | ||
623dffb2 TK |
1937 | int _set_memory_wt(unsigned long addr, int numpages) |
1938 | { | |
1939 | return change_page_attr_set(&addr, numpages, | |
1940 | cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0); | |
1941 | } | |
1942 | ||
1943 | int set_memory_wt(unsigned long addr, int numpages) | |
1944 | { | |
1945 | int ret; | |
1946 | ||
1947 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | |
1948 | _PAGE_CACHE_MODE_WT, NULL); | |
1949 | if (ret) | |
1950 | return ret; | |
1951 | ||
1952 | ret = _set_memory_wt(addr, numpages); | |
1953 | if (ret) | |
1954 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1955 | ||
1956 | return ret; | |
1957 | } | |
1958 | EXPORT_SYMBOL_GPL(set_memory_wt); | |
1959 | ||
1219333d | 1960 | int _set_memory_wb(unsigned long addr, int numpages) |
75cbade8 | 1961 | { |
c06814d8 | 1962 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
d75586ad SL |
1963 | return change_page_attr_clear(&addr, numpages, |
1964 | __pgprot(_PAGE_CACHE_MASK), 0); | |
75cbade8 | 1965 | } |
1219333d | 1966 | |
1967 | int set_memory_wb(unsigned long addr, int numpages) | |
1968 | { | |
9fa3ab39 | 1969 | int ret; |
1970 | ||
1971 | ret = _set_memory_wb(addr, numpages); | |
1972 | if (ret) | |
1973 | return ret; | |
1974 | ||
c15238df | 1975 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
9fa3ab39 | 1976 | return 0; |
1219333d | 1977 | } |
75cbade8 AV |
1978 | EXPORT_SYMBOL(set_memory_wb); |
1979 | ||
d75586ad SL |
1980 | int set_memory_array_wb(unsigned long *addr, int addrinarray) |
1981 | { | |
1982 | int i; | |
a5593e0b | 1983 | int ret; |
1984 | ||
c06814d8 | 1985 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
a5593e0b | 1986 | ret = change_page_attr_clear(addr, addrinarray, |
1987 | __pgprot(_PAGE_CACHE_MASK), 1); | |
9fa3ab39 | 1988 | if (ret) |
1989 | return ret; | |
d75586ad | 1990 | |
9fa3ab39 | 1991 | for (i = 0; i < addrinarray; i++) |
1992 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); | |
c5e147cf | 1993 | |
9fa3ab39 | 1994 | return 0; |
d75586ad SL |
1995 | } |
1996 | EXPORT_SYMBOL(set_memory_array_wb); | |
1997 | ||
75cbade8 AV |
1998 | int set_memory_x(unsigned long addr, int numpages) |
1999 | { | |
583140af PA |
2000 | if (!(__supported_pte_mask & _PAGE_NX)) |
2001 | return 0; | |
2002 | ||
d75586ad | 2003 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); |
75cbade8 AV |
2004 | } |
2005 | EXPORT_SYMBOL(set_memory_x); | |
2006 | ||
2007 | int set_memory_nx(unsigned long addr, int numpages) | |
2008 | { | |
583140af PA |
2009 | if (!(__supported_pte_mask & _PAGE_NX)) |
2010 | return 0; | |
2011 | ||
d75586ad | 2012 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); |
75cbade8 AV |
2013 | } |
2014 | EXPORT_SYMBOL(set_memory_nx); | |
2015 | ||
2016 | int set_memory_ro(unsigned long addr, int numpages) | |
2017 | { | |
d75586ad | 2018 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); |
75cbade8 | 2019 | } |
75cbade8 AV |
2020 | |
2021 | int set_memory_rw(unsigned long addr, int numpages) | |
2022 | { | |
d75586ad | 2023 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); |
75cbade8 | 2024 | } |
f62d0f00 IM |
2025 | |
2026 | int set_memory_np(unsigned long addr, int numpages) | |
2027 | { | |
d75586ad | 2028 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
f62d0f00 | 2029 | } |
75cbade8 | 2030 | |
c40a56a7 DH |
2031 | int set_memory_np_noalias(unsigned long addr, int numpages) |
2032 | { | |
2033 | int cpa_flags = CPA_NO_CHECK_ALIAS; | |
2034 | ||
2035 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), | |
2036 | __pgprot(_PAGE_PRESENT), 0, | |
2037 | cpa_flags, NULL); | |
2038 | } | |
2039 | ||
c9caa02c AK |
2040 | int set_memory_4k(unsigned long addr, int numpages) |
2041 | { | |
d75586ad | 2042 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
9ae28475 | 2043 | __pgprot(0), 1, 0, NULL); |
c9caa02c AK |
2044 | } |
2045 | ||
39114b7a DH |
2046 | int set_memory_nonglobal(unsigned long addr, int numpages) |
2047 | { | |
2048 | return change_page_attr_clear(&addr, numpages, | |
2049 | __pgprot(_PAGE_GLOBAL), 0); | |
2050 | } | |
2051 | ||
eac7073a DH |
2052 | int set_memory_global(unsigned long addr, int numpages) |
2053 | { | |
2054 | return change_page_attr_set(&addr, numpages, | |
2055 | __pgprot(_PAGE_GLOBAL), 0); | |
2056 | } | |
2057 | ||
77bd2342 TL |
2058 | static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) |
2059 | { | |
2060 | struct cpa_data cpa; | |
2061 | unsigned long start; | |
2062 | int ret; | |
2063 | ||
a72ec5a3 TL |
2064 | /* Nothing to do if memory encryption is not active */ |
2065 | if (!mem_encrypt_active()) | |
77bd2342 TL |
2066 | return 0; |
2067 | ||
2068 | /* Should not be working on unaligned addresses */ | |
2069 | if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) | |
2070 | addr &= PAGE_MASK; | |
2071 | ||
2072 | start = addr; | |
2073 | ||
2074 | memset(&cpa, 0, sizeof(cpa)); | |
2075 | cpa.vaddr = &addr; | |
2076 | cpa.numpages = numpages; | |
2077 | cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0); | |
2078 | cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC); | |
2079 | cpa.pgd = init_mm.pgd; | |
2080 | ||
2081 | /* Must avoid aliasing mappings in the highmem code */ | |
2082 | kmap_flush_unused(); | |
2083 | vm_unmap_aliases(); | |
2084 | ||
2085 | /* | |
2086 | * Before changing the encryption attribute, we need to flush caches. | |
2087 | */ | |
5f464b33 | 2088 | cpa_flush_range(start, numpages, 1); |
77bd2342 TL |
2089 | |
2090 | ret = __change_page_attr_set_clr(&cpa, 1); | |
2091 | ||
2092 | /* | |
2093 | * After changing the encryption attribute, we need to flush TLBs | |
2094 | * again in case any speculative TLB caching occurred (but no need | |
2095 | * to flush caches again). We could just use cpa_flush_all(), but | |
2096 | * in case TLB flushing gets optimized in the cpa_flush_range() | |
2097 | * path use the same logic as above. | |
2098 | */ | |
5f464b33 | 2099 | cpa_flush_range(start, numpages, 0); |
77bd2342 TL |
2100 | |
2101 | return ret; | |
2102 | } | |
2103 | ||
2104 | int set_memory_encrypted(unsigned long addr, int numpages) | |
2105 | { | |
2106 | return __set_memory_enc_dec(addr, numpages, true); | |
2107 | } | |
95cf9264 | 2108 | EXPORT_SYMBOL_GPL(set_memory_encrypted); |
77bd2342 TL |
2109 | |
2110 | int set_memory_decrypted(unsigned long addr, int numpages) | |
2111 | { | |
2112 | return __set_memory_enc_dec(addr, numpages, false); | |
2113 | } | |
95cf9264 | 2114 | EXPORT_SYMBOL_GPL(set_memory_decrypted); |
77bd2342 | 2115 | |
75cbade8 AV |
2116 | int set_pages_uc(struct page *page, int numpages) |
2117 | { | |
2118 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 2119 | |
d7c8f21a | 2120 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
2121 | } |
2122 | EXPORT_SYMBOL(set_pages_uc); | |
2123 | ||
4f646254 | 2124 | static int _set_pages_array(struct page **pages, int addrinarray, |
c06814d8 | 2125 | enum page_cache_mode new_type) |
0f350755 | 2126 | { |
2127 | unsigned long start; | |
2128 | unsigned long end; | |
623dffb2 | 2129 | enum page_cache_mode set_type; |
0f350755 | 2130 | int i; |
2131 | int free_idx; | |
4f646254 | 2132 | int ret; |
0f350755 | 2133 | |
2134 | for (i = 0; i < addrinarray; i++) { | |
8523acfe TH |
2135 | if (PageHighMem(pages[i])) |
2136 | continue; | |
2137 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 2138 | end = start + PAGE_SIZE; |
4f646254 | 2139 | if (reserve_memtype(start, end, new_type, NULL)) |
0f350755 | 2140 | goto err_out; |
2141 | } | |
2142 | ||
623dffb2 TK |
2143 | /* If WC, set to UC- first and then WC */ |
2144 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? | |
2145 | _PAGE_CACHE_MODE_UC_MINUS : new_type; | |
2146 | ||
4f646254 | 2147 | ret = cpa_set_pages_array(pages, addrinarray, |
623dffb2 | 2148 | cachemode2pgprot(set_type)); |
c06814d8 | 2149 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
4f646254 | 2150 | ret = change_page_attr_set_clr(NULL, addrinarray, |
c06814d8 JG |
2151 | cachemode2pgprot( |
2152 | _PAGE_CACHE_MODE_WC), | |
4f646254 PN |
2153 | __pgprot(_PAGE_CACHE_MASK), |
2154 | 0, CPA_PAGES_ARRAY, pages); | |
2155 | if (ret) | |
2156 | goto err_out; | |
2157 | return 0; /* Success */ | |
0f350755 | 2158 | err_out: |
2159 | free_idx = i; | |
2160 | for (i = 0; i < free_idx; i++) { | |
8523acfe TH |
2161 | if (PageHighMem(pages[i])) |
2162 | continue; | |
2163 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 2164 | end = start + PAGE_SIZE; |
2165 | free_memtype(start, end); | |
2166 | } | |
2167 | return -EINVAL; | |
2168 | } | |
4f646254 PN |
2169 | |
2170 | int set_pages_array_uc(struct page **pages, int addrinarray) | |
2171 | { | |
c06814d8 | 2172 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
4f646254 | 2173 | } |
0f350755 | 2174 | EXPORT_SYMBOL(set_pages_array_uc); |
2175 | ||
4f646254 PN |
2176 | int set_pages_array_wc(struct page **pages, int addrinarray) |
2177 | { | |
c06814d8 | 2178 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); |
4f646254 PN |
2179 | } |
2180 | EXPORT_SYMBOL(set_pages_array_wc); | |
2181 | ||
623dffb2 TK |
2182 | int set_pages_array_wt(struct page **pages, int addrinarray) |
2183 | { | |
2184 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT); | |
2185 | } | |
2186 | EXPORT_SYMBOL_GPL(set_pages_array_wt); | |
2187 | ||
75cbade8 AV |
2188 | int set_pages_wb(struct page *page, int numpages) |
2189 | { | |
2190 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 2191 | |
d7c8f21a | 2192 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
2193 | } |
2194 | EXPORT_SYMBOL(set_pages_wb); | |
2195 | ||
0f350755 | 2196 | int set_pages_array_wb(struct page **pages, int addrinarray) |
2197 | { | |
2198 | int retval; | |
2199 | unsigned long start; | |
2200 | unsigned long end; | |
2201 | int i; | |
2202 | ||
c06814d8 | 2203 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
0f350755 | 2204 | retval = cpa_clear_pages_array(pages, addrinarray, |
2205 | __pgprot(_PAGE_CACHE_MASK)); | |
9fa3ab39 | 2206 | if (retval) |
2207 | return retval; | |
0f350755 | 2208 | |
2209 | for (i = 0; i < addrinarray; i++) { | |
8523acfe TH |
2210 | if (PageHighMem(pages[i])) |
2211 | continue; | |
2212 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 2213 | end = start + PAGE_SIZE; |
2214 | free_memtype(start, end); | |
2215 | } | |
2216 | ||
9fa3ab39 | 2217 | return 0; |
0f350755 | 2218 | } |
2219 | EXPORT_SYMBOL(set_pages_array_wb); | |
2220 | ||
75cbade8 AV |
2221 | int set_pages_x(struct page *page, int numpages) |
2222 | { | |
2223 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 2224 | |
d7c8f21a | 2225 | return set_memory_x(addr, numpages); |
75cbade8 AV |
2226 | } |
2227 | EXPORT_SYMBOL(set_pages_x); | |
2228 | ||
2229 | int set_pages_nx(struct page *page, int numpages) | |
2230 | { | |
2231 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 2232 | |
d7c8f21a | 2233 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
2234 | } |
2235 | EXPORT_SYMBOL(set_pages_nx); | |
2236 | ||
2237 | int set_pages_ro(struct page *page, int numpages) | |
2238 | { | |
2239 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 2240 | |
d7c8f21a | 2241 | return set_memory_ro(addr, numpages); |
75cbade8 | 2242 | } |
75cbade8 AV |
2243 | |
2244 | int set_pages_rw(struct page *page, int numpages) | |
2245 | { | |
2246 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 2247 | |
d7c8f21a | 2248 | return set_memory_rw(addr, numpages); |
78c94aba IM |
2249 | } |
2250 | ||
1da177e4 | 2251 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
2252 | |
2253 | static int __set_pages_p(struct page *page, int numpages) | |
2254 | { | |
d75586ad SL |
2255 | unsigned long tempaddr = (unsigned long) page_address(page); |
2256 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
82f0712c | 2257 | .pgd = NULL, |
72e458df TG |
2258 | .numpages = numpages, |
2259 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
d75586ad SL |
2260 | .mask_clr = __pgprot(0), |
2261 | .flags = 0}; | |
72932c7a | 2262 | |
55121b43 SS |
2263 | /* |
2264 | * No alias checking needed for setting present flag. otherwise, | |
2265 | * we may need to break large pages for 64-bit kernel text | |
2266 | * mappings (this adds to complexity if we want to do this from | |
2267 | * atomic context especially). Let's keep it simple! | |
2268 | */ | |
2269 | return __change_page_attr_set_clr(&cpa, 0); | |
f62d0f00 IM |
2270 | } |
2271 | ||
2272 | static int __set_pages_np(struct page *page, int numpages) | |
2273 | { | |
d75586ad SL |
2274 | unsigned long tempaddr = (unsigned long) page_address(page); |
2275 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
82f0712c | 2276 | .pgd = NULL, |
72e458df TG |
2277 | .numpages = numpages, |
2278 | .mask_set = __pgprot(0), | |
d75586ad SL |
2279 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
2280 | .flags = 0}; | |
72932c7a | 2281 | |
55121b43 SS |
2282 | /* |
2283 | * No alias checking needed for setting not present flag. otherwise, | |
2284 | * we may need to break large pages for 64-bit kernel text | |
2285 | * mappings (this adds to complexity if we want to do this from | |
2286 | * atomic context especially). Let's keep it simple! | |
2287 | */ | |
2288 | return __change_page_attr_set_clr(&cpa, 0); | |
f62d0f00 IM |
2289 | } |
2290 | ||
031bc574 | 2291 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
1da177e4 LT |
2292 | { |
2293 | if (PageHighMem(page)) | |
2294 | return; | |
9f4c815c | 2295 | if (!enable) { |
f9b8404c IM |
2296 | debug_check_no_locks_freed(page_address(page), |
2297 | numpages * PAGE_SIZE); | |
9f4c815c | 2298 | } |
de5097c2 | 2299 | |
9f4c815c | 2300 | /* |
f8d8406b | 2301 | * The return value is ignored as the calls cannot fail. |
55121b43 SS |
2302 | * Large pages for identity mappings are not used at boot time |
2303 | * and hence no memory allocations during large page split. | |
1da177e4 | 2304 | */ |
f62d0f00 IM |
2305 | if (enable) |
2306 | __set_pages_p(page, numpages); | |
2307 | else | |
2308 | __set_pages_np(page, numpages); | |
9f4c815c IM |
2309 | |
2310 | /* | |
e4b71dcf IM |
2311 | * We should perform an IPI and flush all tlbs, |
2312 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
2313 | */ |
2314 | __flush_tlb_all(); | |
26564600 BO |
2315 | |
2316 | arch_flush_lazy_mmu_mode(); | |
ee7ae7a1 TG |
2317 | } |
2318 | ||
8a235efa RW |
2319 | #ifdef CONFIG_HIBERNATION |
2320 | ||
2321 | bool kernel_page_present(struct page *page) | |
2322 | { | |
2323 | unsigned int level; | |
2324 | pte_t *pte; | |
2325 | ||
2326 | if (PageHighMem(page)) | |
2327 | return false; | |
2328 | ||
2329 | pte = lookup_address((unsigned long)page_address(page), &level); | |
2330 | return (pte_val(*pte) & _PAGE_PRESENT); | |
2331 | } | |
2332 | ||
2333 | #endif /* CONFIG_HIBERNATION */ | |
2334 | ||
2335 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
d1028a15 | 2336 | |
82f0712c BP |
2337 | int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
2338 | unsigned numpages, unsigned long page_flags) | |
2339 | { | |
2340 | int retval = -EINVAL; | |
2341 | ||
2342 | struct cpa_data cpa = { | |
2343 | .vaddr = &address, | |
2344 | .pfn = pfn, | |
2345 | .pgd = pgd, | |
2346 | .numpages = numpages, | |
2347 | .mask_set = __pgprot(0), | |
2348 | .mask_clr = __pgprot(0), | |
2349 | .flags = 0, | |
2350 | }; | |
2351 | ||
2352 | if (!(__supported_pte_mask & _PAGE_NX)) | |
2353 | goto out; | |
2354 | ||
2355 | if (!(page_flags & _PAGE_NX)) | |
2356 | cpa.mask_clr = __pgprot(_PAGE_NX); | |
2357 | ||
15f003d2 SP |
2358 | if (!(page_flags & _PAGE_RW)) |
2359 | cpa.mask_clr = __pgprot(_PAGE_RW); | |
2360 | ||
21729f81 TL |
2361 | if (!(page_flags & _PAGE_ENC)) |
2362 | cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); | |
2363 | ||
82f0712c BP |
2364 | cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); |
2365 | ||
2366 | retval = __change_page_attr_set_clr(&cpa, 0); | |
2367 | __flush_tlb_all(); | |
2368 | ||
2369 | out: | |
2370 | return retval; | |
2371 | } | |
2372 | ||
d1028a15 AV |
2373 | /* |
2374 | * The testcases use internal knowledge of the implementation that shouldn't | |
2375 | * be exposed to the rest of the kernel. Include these directly here. | |
2376 | */ | |
2377 | #ifdef CONFIG_CPA_DEBUG | |
2378 | #include "pageattr-test.c" | |
2379 | #endif |