Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
9f4c815c | 7 | #include <linux/sched.h> |
9f4c815c | 8 | #include <linux/mm.h> |
76ebd054 | 9 | #include <linux/interrupt.h> |
ee7ae7a1 TG |
10 | #include <linux/seq_file.h> |
11 | #include <linux/debugfs.h> | |
e59a1bb2 | 12 | #include <linux/pfn.h> |
8c4bfc6e | 13 | #include <linux/percpu.h> |
5a0e3ad6 | 14 | #include <linux/gfp.h> |
5bd5a452 | 15 | #include <linux/pci.h> |
d6472302 | 16 | #include <linux/vmalloc.h> |
9f4c815c | 17 | |
66441bd3 | 18 | #include <asm/e820/api.h> |
1da177e4 LT |
19 | #include <asm/processor.h> |
20 | #include <asm/tlbflush.h> | |
f8af095d | 21 | #include <asm/sections.h> |
93dbda7c | 22 | #include <asm/setup.h> |
7c0f6ba6 | 23 | #include <linux/uaccess.h> |
9f4c815c | 24 | #include <asm/pgalloc.h> |
c31c7d48 | 25 | #include <asm/proto.h> |
1219333d | 26 | #include <asm/pat.h> |
d1163651 | 27 | #include <asm/set_memory.h> |
1da177e4 | 28 | |
9df84993 IM |
29 | /* |
30 | * The current flushing context - we pass it instead of 5 arguments: | |
31 | */ | |
72e458df | 32 | struct cpa_data { |
d75586ad | 33 | unsigned long *vaddr; |
0fd64c23 | 34 | pgd_t *pgd; |
72e458df TG |
35 | pgprot_t mask_set; |
36 | pgprot_t mask_clr; | |
74256377 | 37 | unsigned long numpages; |
d75586ad | 38 | int flags; |
c31c7d48 | 39 | unsigned long pfn; |
c9caa02c | 40 | unsigned force_split : 1; |
d75586ad | 41 | int curpage; |
9ae28475 | 42 | struct page **pages; |
72e458df TG |
43 | }; |
44 | ||
ad5ca55f SS |
45 | /* |
46 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | |
47 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | |
48 | * entries change the page attribute in parallel to some other cpu | |
49 | * splitting a large page entry along with changing the attribute. | |
50 | */ | |
51 | static DEFINE_SPINLOCK(cpa_lock); | |
52 | ||
d75586ad SL |
53 | #define CPA_FLUSHTLB 1 |
54 | #define CPA_ARRAY 2 | |
9ae28475 | 55 | #define CPA_PAGES_ARRAY 4 |
c40a56a7 | 56 | #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */ |
d75586ad | 57 | |
65280e61 | 58 | #ifdef CONFIG_PROC_FS |
ce0c0e50 AK |
59 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
60 | ||
65280e61 | 61 | void update_page_count(int level, unsigned long pages) |
ce0c0e50 | 62 | { |
ce0c0e50 | 63 | /* Protect against CPA */ |
a79e53d8 | 64 | spin_lock(&pgd_lock); |
ce0c0e50 | 65 | direct_pages_count[level] += pages; |
a79e53d8 | 66 | spin_unlock(&pgd_lock); |
65280e61 TG |
67 | } |
68 | ||
69 | static void split_page_count(int level) | |
70 | { | |
c9e0d391 DJ |
71 | if (direct_pages_count[level] == 0) |
72 | return; | |
73 | ||
65280e61 TG |
74 | direct_pages_count[level]--; |
75 | direct_pages_count[level - 1] += PTRS_PER_PTE; | |
76 | } | |
77 | ||
e1759c21 | 78 | void arch_report_meminfo(struct seq_file *m) |
65280e61 | 79 | { |
b9c3bfc2 | 80 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
a06de630 HD |
81 | direct_pages_count[PG_LEVEL_4K] << 2); |
82 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
b9c3bfc2 | 83 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
a06de630 HD |
84 | direct_pages_count[PG_LEVEL_2M] << 11); |
85 | #else | |
b9c3bfc2 | 86 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
a06de630 HD |
87 | direct_pages_count[PG_LEVEL_2M] << 12); |
88 | #endif | |
a06de630 | 89 | if (direct_gbpages) |
b9c3bfc2 | 90 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
a06de630 | 91 | direct_pages_count[PG_LEVEL_1G] << 20); |
ce0c0e50 | 92 | } |
65280e61 TG |
93 | #else |
94 | static inline void split_page_count(int level) { } | |
95 | #endif | |
ce0c0e50 | 96 | |
58e65b51 DH |
97 | static inline int |
98 | within(unsigned long addr, unsigned long start, unsigned long end) | |
99 | { | |
100 | return addr >= start && addr < end; | |
101 | } | |
102 | ||
103 | static inline int | |
104 | within_inclusive(unsigned long addr, unsigned long start, unsigned long end) | |
105 | { | |
106 | return addr >= start && addr <= end; | |
107 | } | |
108 | ||
c31c7d48 TG |
109 | #ifdef CONFIG_X86_64 |
110 | ||
111 | static inline unsigned long highmap_start_pfn(void) | |
112 | { | |
fc8d7826 | 113 | return __pa_symbol(_text) >> PAGE_SHIFT; |
c31c7d48 TG |
114 | } |
115 | ||
116 | static inline unsigned long highmap_end_pfn(void) | |
117 | { | |
4ff53087 TG |
118 | /* Do not reference physical address outside the kernel. */ |
119 | return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; | |
c31c7d48 TG |
120 | } |
121 | ||
58e65b51 | 122 | static bool __cpa_pfn_in_highmap(unsigned long pfn) |
687c4825 | 123 | { |
58e65b51 DH |
124 | /* |
125 | * Kernel text has an alias mapping at a high address, known | |
126 | * here as "highmap". | |
127 | */ | |
128 | return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); | |
ed724be6 AV |
129 | } |
130 | ||
58e65b51 DH |
131 | #else |
132 | ||
133 | static bool __cpa_pfn_in_highmap(unsigned long pfn) | |
4ff53087 | 134 | { |
58e65b51 DH |
135 | /* There is no highmap on 32-bit */ |
136 | return false; | |
4ff53087 TG |
137 | } |
138 | ||
58e65b51 DH |
139 | #endif |
140 | ||
d7c8f21a TG |
141 | /* |
142 | * Flushing functions | |
143 | */ | |
cd8ddf1a | 144 | |
cd8ddf1a TG |
145 | /** |
146 | * clflush_cache_range - flush a cache range with clflush | |
9efc31b8 | 147 | * @vaddr: virtual start address |
cd8ddf1a TG |
148 | * @size: number of bytes to flush |
149 | * | |
8b80fd8b RZ |
150 | * clflushopt is an unordered instruction which needs fencing with mfence or |
151 | * sfence to avoid ordering issues. | |
cd8ddf1a | 152 | */ |
4c61afcd | 153 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 154 | { |
1f1a89ac CW |
155 | const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; |
156 | void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); | |
6c434d61 | 157 | void *vend = vaddr + size; |
1f1a89ac CW |
158 | |
159 | if (p >= vend) | |
160 | return; | |
d7c8f21a | 161 | |
cd8ddf1a | 162 | mb(); |
4c61afcd | 163 | |
1f1a89ac | 164 | for (; p < vend; p += clflush_size) |
6c434d61 | 165 | clflushopt(p); |
4c61afcd | 166 | |
cd8ddf1a | 167 | mb(); |
d7c8f21a | 168 | } |
e517a5e9 | 169 | EXPORT_SYMBOL_GPL(clflush_cache_range); |
d7c8f21a | 170 | |
f2b61257 DW |
171 | void arch_invalidate_pmem(void *addr, size_t size) |
172 | { | |
173 | clflush_cache_range(addr, size); | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(arch_invalidate_pmem); | |
176 | ||
af1e6844 | 177 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 178 | { |
6bb8383b AK |
179 | unsigned long cache = (unsigned long)arg; |
180 | ||
d7c8f21a TG |
181 | /* |
182 | * Flush all to work around Errata in early athlons regarding | |
183 | * large page flushing. | |
184 | */ | |
185 | __flush_tlb_all(); | |
186 | ||
0b827537 | 187 | if (cache && boot_cpu_data.x86 >= 4) |
d7c8f21a TG |
188 | wbinvd(); |
189 | } | |
190 | ||
6bb8383b | 191 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a | 192 | { |
d2479a30 | 193 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
d7c8f21a | 194 | |
15c8b6c1 | 195 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
d7c8f21a TG |
196 | } |
197 | ||
57a6a46a TG |
198 | static void __cpa_flush_range(void *arg) |
199 | { | |
57a6a46a TG |
200 | /* |
201 | * We could optimize that further and do individual per page | |
202 | * tlb invalidates for a low number of pages. Caveat: we must | |
203 | * flush the high aliases on 64bit as well. | |
204 | */ | |
205 | __flush_tlb_all(); | |
57a6a46a TG |
206 | } |
207 | ||
6bb8383b | 208 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 209 | { |
4c61afcd IM |
210 | unsigned int i, level; |
211 | unsigned long addr; | |
212 | ||
a53276e2 | 213 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
4c61afcd | 214 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 215 | |
15c8b6c1 | 216 | on_each_cpu(__cpa_flush_range, NULL, 1); |
57a6a46a | 217 | |
6bb8383b AK |
218 | if (!cache) |
219 | return; | |
220 | ||
3b233e52 TG |
221 | /* |
222 | * We only need to flush on one CPU, | |
223 | * clflush is a MESI-coherent instruction that | |
224 | * will cause all other CPUs to flush the same | |
225 | * cachelines: | |
226 | */ | |
4c61afcd IM |
227 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
228 | pte_t *pte = lookup_address(addr, &level); | |
229 | ||
230 | /* | |
231 | * Only flush present addresses: | |
232 | */ | |
7bfb72e8 | 233 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
234 | clflush_cache_range((void *) addr, PAGE_SIZE); |
235 | } | |
57a6a46a TG |
236 | } |
237 | ||
9ae28475 | 238 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, |
239 | int in_flags, struct page **pages) | |
d75586ad SL |
240 | { |
241 | unsigned int i, level; | |
459fbe00 JO |
242 | #ifdef CONFIG_PREEMPT |
243 | /* | |
244 | * Avoid wbinvd() because it causes latencies on all CPUs, | |
245 | * regardless of any CPU isolation that may be in effect. | |
246 | * | |
247 | * This should be extended for CAT enabled systems independent of | |
248 | * PREEMPT because wbinvd() does not respect the CAT partitions and | |
249 | * this is exposed to unpriviledged users through the graphics | |
250 | * subsystem. | |
251 | */ | |
252 | unsigned long do_wbinvd = 0; | |
253 | #else | |
2171787b | 254 | unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ |
459fbe00 | 255 | #endif |
d75586ad | 256 | |
d2479a30 | 257 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
d75586ad | 258 | |
2171787b | 259 | on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); |
d75586ad | 260 | |
2171787b | 261 | if (!cache || do_wbinvd) |
d75586ad SL |
262 | return; |
263 | ||
d75586ad SL |
264 | /* |
265 | * We only need to flush on one CPU, | |
266 | * clflush is a MESI-coherent instruction that | |
267 | * will cause all other CPUs to flush the same | |
268 | * cachelines: | |
269 | */ | |
9ae28475 | 270 | for (i = 0; i < numpages; i++) { |
271 | unsigned long addr; | |
272 | pte_t *pte; | |
273 | ||
274 | if (in_flags & CPA_PAGES_ARRAY) | |
275 | addr = (unsigned long)page_address(pages[i]); | |
276 | else | |
277 | addr = start[i]; | |
278 | ||
279 | pte = lookup_address(addr, &level); | |
d75586ad SL |
280 | |
281 | /* | |
282 | * Only flush present addresses: | |
283 | */ | |
284 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | |
9ae28475 | 285 | clflush_cache_range((void *)addr, PAGE_SIZE); |
d75586ad SL |
286 | } |
287 | } | |
288 | ||
ed724be6 AV |
289 | /* |
290 | * Certain areas of memory on x86 require very specific protection flags, | |
291 | * for example the BIOS area or kernel text. Callers don't always get this | |
292 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
293 | * checks and fixes these known static required protection bits. | |
294 | */ | |
c31c7d48 TG |
295 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
296 | unsigned long pfn) | |
ed724be6 AV |
297 | { |
298 | pgprot_t forbidden = __pgprot(0); | |
299 | ||
687c4825 | 300 | /* |
ed724be6 AV |
301 | * The BIOS area between 640k and 1Mb needs to be executable for |
302 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 303 | */ |
5bd5a452 MC |
304 | #ifdef CONFIG_PCI_BIOS |
305 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | |
ed724be6 | 306 | pgprot_val(forbidden) |= _PAGE_NX; |
5bd5a452 | 307 | #endif |
ed724be6 AV |
308 | |
309 | /* | |
310 | * The kernel text needs to be executable for obvious reasons | |
c31c7d48 TG |
311 | * Does not cover __inittext since that is gone later on. On |
312 | * 64bit we do not enforce !NX on the low mapping | |
ed724be6 AV |
313 | */ |
314 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
315 | pgprot_val(forbidden) |= _PAGE_NX; | |
cc0f21bb | 316 | |
cc0f21bb | 317 | /* |
c31c7d48 | 318 | * The .rodata section needs to be read-only. Using the pfn |
639d6aaf DH |
319 | * catches all aliases. This also includes __ro_after_init, |
320 | * so do not enforce until kernel_set_to_readonly is true. | |
cc0f21bb | 321 | */ |
639d6aaf DH |
322 | if (kernel_set_to_readonly && |
323 | within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, | |
fc8d7826 | 324 | __pa_symbol(__end_rodata) >> PAGE_SHIFT)) |
cc0f21bb | 325 | pgprot_val(forbidden) |= _PAGE_RW; |
ed724be6 | 326 | |
9ccaf77c | 327 | #if defined(CONFIG_X86_64) |
74e08179 | 328 | /* |
502f6604 SS |
329 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), |
330 | * kernel text mappings for the large page aligned text, rodata sections | |
331 | * will be always read-only. For the kernel identity mappings covering | |
332 | * the holes caused by this alignment can be anything that user asks. | |
74e08179 SS |
333 | * |
334 | * This will preserve the large page mappings for kernel text/data | |
335 | * at no extra cost. | |
336 | */ | |
502f6604 SS |
337 | if (kernel_set_to_readonly && |
338 | within(address, (unsigned long)_text, | |
281ff33b SS |
339 | (unsigned long)__end_rodata_hpage_align)) { |
340 | unsigned int level; | |
341 | ||
342 | /* | |
343 | * Don't enforce the !RW mapping for the kernel text mapping, | |
344 | * if the current mapping is already using small page mapping. | |
345 | * No need to work hard to preserve large page mappings in this | |
346 | * case. | |
347 | * | |
348 | * This also fixes the Linux Xen paravirt guest boot failure | |
349 | * (because of unexpected read-only mappings for kernel identity | |
350 | * mappings). In this paravirt guest case, the kernel text | |
351 | * mapping and the kernel identity mapping share the same | |
352 | * page-table pages. Thus we can't really use different | |
353 | * protections for the kernel text and identity mappings. Also, | |
354 | * these shared mappings are made of small page mappings. | |
355 | * Thus this don't enforce !RW mapping for small page kernel | |
356 | * text mapping logic will help Linux Xen parvirt guest boot | |
0d2eb44f | 357 | * as well. |
281ff33b SS |
358 | */ |
359 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) | |
360 | pgprot_val(forbidden) |= _PAGE_RW; | |
361 | } | |
74e08179 SS |
362 | #endif |
363 | ||
ed724be6 | 364 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
687c4825 IM |
365 | |
366 | return prot; | |
367 | } | |
368 | ||
426e34cc MF |
369 | /* |
370 | * Lookup the page table entry for a virtual address in a specific pgd. | |
371 | * Return a pointer to the entry and the level of the mapping. | |
372 | */ | |
373 | pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, | |
374 | unsigned int *level) | |
9f4c815c | 375 | { |
45478336 | 376 | p4d_t *p4d; |
1da177e4 LT |
377 | pud_t *pud; |
378 | pmd_t *pmd; | |
9f4c815c | 379 | |
30551bb3 TG |
380 | *level = PG_LEVEL_NONE; |
381 | ||
1da177e4 LT |
382 | if (pgd_none(*pgd)) |
383 | return NULL; | |
9df84993 | 384 | |
45478336 KS |
385 | p4d = p4d_offset(pgd, address); |
386 | if (p4d_none(*p4d)) | |
387 | return NULL; | |
388 | ||
389 | *level = PG_LEVEL_512G; | |
390 | if (p4d_large(*p4d) || !p4d_present(*p4d)) | |
391 | return (pte_t *)p4d; | |
392 | ||
393 | pud = pud_offset(p4d, address); | |
1da177e4 LT |
394 | if (pud_none(*pud)) |
395 | return NULL; | |
c2f71ee2 AK |
396 | |
397 | *level = PG_LEVEL_1G; | |
398 | if (pud_large(*pud) || !pud_present(*pud)) | |
399 | return (pte_t *)pud; | |
400 | ||
1da177e4 LT |
401 | pmd = pmd_offset(pud, address); |
402 | if (pmd_none(*pmd)) | |
403 | return NULL; | |
30551bb3 TG |
404 | |
405 | *level = PG_LEVEL_2M; | |
9a14aefc | 406 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 407 | return (pte_t *)pmd; |
1da177e4 | 408 | |
30551bb3 | 409 | *level = PG_LEVEL_4K; |
9df84993 | 410 | |
9f4c815c IM |
411 | return pte_offset_kernel(pmd, address); |
412 | } | |
0fd64c23 BP |
413 | |
414 | /* | |
415 | * Lookup the page table entry for a virtual address. Return a pointer | |
416 | * to the entry and the level of the mapping. | |
417 | * | |
418 | * Note: We return pud and pmd either when the entry is marked large | |
419 | * or when the present bit is not set. Otherwise we would return a | |
420 | * pointer to a nonexisting mapping. | |
421 | */ | |
422 | pte_t *lookup_address(unsigned long address, unsigned int *level) | |
423 | { | |
426e34cc | 424 | return lookup_address_in_pgd(pgd_offset_k(address), address, level); |
0fd64c23 | 425 | } |
75bb8835 | 426 | EXPORT_SYMBOL_GPL(lookup_address); |
9f4c815c | 427 | |
0fd64c23 BP |
428 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, |
429 | unsigned int *level) | |
430 | { | |
431 | if (cpa->pgd) | |
426e34cc | 432 | return lookup_address_in_pgd(cpa->pgd + pgd_index(address), |
0fd64c23 BP |
433 | address, level); |
434 | ||
435 | return lookup_address(address, level); | |
436 | } | |
437 | ||
792230c3 JG |
438 | /* |
439 | * Lookup the PMD entry for a virtual address. Return a pointer to the entry | |
440 | * or NULL if not present. | |
441 | */ | |
442 | pmd_t *lookup_pmd_address(unsigned long address) | |
443 | { | |
444 | pgd_t *pgd; | |
45478336 | 445 | p4d_t *p4d; |
792230c3 JG |
446 | pud_t *pud; |
447 | ||
448 | pgd = pgd_offset_k(address); | |
449 | if (pgd_none(*pgd)) | |
450 | return NULL; | |
451 | ||
45478336 KS |
452 | p4d = p4d_offset(pgd, address); |
453 | if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d)) | |
454 | return NULL; | |
455 | ||
456 | pud = pud_offset(p4d, address); | |
792230c3 JG |
457 | if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) |
458 | return NULL; | |
459 | ||
460 | return pmd_offset(pud, address); | |
461 | } | |
462 | ||
d7656534 DH |
463 | /* |
464 | * This is necessary because __pa() does not work on some | |
465 | * kinds of memory, like vmalloc() or the alloc_remap() | |
466 | * areas on 32-bit NUMA systems. The percpu areas can | |
467 | * end up in this kind of memory, for instance. | |
468 | * | |
469 | * This could be optimized, but it is only intended to be | |
470 | * used at inititalization time, and keeping it | |
471 | * unoptimized should increase the testing coverage for | |
472 | * the more obscure platforms. | |
473 | */ | |
474 | phys_addr_t slow_virt_to_phys(void *__virt_addr) | |
475 | { | |
476 | unsigned long virt_addr = (unsigned long)__virt_addr; | |
bf70e551 DC |
477 | phys_addr_t phys_addr; |
478 | unsigned long offset; | |
d7656534 | 479 | enum pg_level level; |
d7656534 DH |
480 | pte_t *pte; |
481 | ||
482 | pte = lookup_address(virt_addr, &level); | |
483 | BUG_ON(!pte); | |
34437e67 | 484 | |
bf70e551 DC |
485 | /* |
486 | * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t | |
487 | * before being left-shifted PAGE_SHIFT bits -- this trick is to | |
488 | * make 32-PAE kernel work correctly. | |
489 | */ | |
34437e67 TK |
490 | switch (level) { |
491 | case PG_LEVEL_1G: | |
bf70e551 | 492 | phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; |
34437e67 TK |
493 | offset = virt_addr & ~PUD_PAGE_MASK; |
494 | break; | |
495 | case PG_LEVEL_2M: | |
bf70e551 | 496 | phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; |
34437e67 TK |
497 | offset = virt_addr & ~PMD_PAGE_MASK; |
498 | break; | |
499 | default: | |
bf70e551 | 500 | phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; |
34437e67 TK |
501 | offset = virt_addr & ~PAGE_MASK; |
502 | } | |
503 | ||
504 | return (phys_addr_t)(phys_addr | offset); | |
d7656534 DH |
505 | } |
506 | EXPORT_SYMBOL_GPL(slow_virt_to_phys); | |
507 | ||
9df84993 IM |
508 | /* |
509 | * Set the new pmd in all the pgds we know about: | |
510 | */ | |
9a3dc780 | 511 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 512 | { |
9f4c815c IM |
513 | /* change init_mm */ |
514 | set_pte_atomic(kpte, pte); | |
44af6c41 | 515 | #ifdef CONFIG_X86_32 |
e4b71dcf | 516 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
517 | struct page *page; |
518 | ||
e3ed910d | 519 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 | 520 | pgd_t *pgd; |
45478336 | 521 | p4d_t *p4d; |
44af6c41 IM |
522 | pud_t *pud; |
523 | pmd_t *pmd; | |
524 | ||
525 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
45478336 KS |
526 | p4d = p4d_offset(pgd, address); |
527 | pud = pud_offset(p4d, address); | |
44af6c41 IM |
528 | pmd = pmd_offset(pud, address); |
529 | set_pte_atomic((pte_t *)pmd, pte); | |
530 | } | |
1da177e4 | 531 | } |
44af6c41 | 532 | #endif |
1da177e4 LT |
533 | } |
534 | ||
d1440b23 DH |
535 | static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) |
536 | { | |
537 | /* | |
538 | * _PAGE_GLOBAL means "global page" for present PTEs. | |
539 | * But, it is also used to indicate _PAGE_PROTNONE | |
540 | * for non-present PTEs. | |
541 | * | |
542 | * This ensures that a _PAGE_GLOBAL PTE going from | |
543 | * present to non-present is not confused as | |
544 | * _PAGE_PROTNONE. | |
545 | */ | |
546 | if (!(pgprot_val(prot) & _PAGE_PRESENT)) | |
547 | pgprot_val(prot) &= ~_PAGE_GLOBAL; | |
548 | ||
549 | return prot; | |
550 | } | |
551 | ||
9df84993 IM |
552 | static int |
553 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
554 | struct cpa_data *cpa) | |
65e074df | 555 | { |
3a19109e | 556 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn; |
65e074df | 557 | pte_t new_pte, old_pte, *tmp; |
64edc8ed | 558 | pgprot_t old_prot, new_prot, req_prot; |
fac84939 | 559 | int i, do_split = 1; |
f3c4fbb6 | 560 | enum pg_level level; |
65e074df | 561 | |
c9caa02c AK |
562 | if (cpa->force_split) |
563 | return 1; | |
564 | ||
a79e53d8 | 565 | spin_lock(&pgd_lock); |
65e074df TG |
566 | /* |
567 | * Check for races, another CPU might have split this page | |
568 | * up already: | |
569 | */ | |
82f0712c | 570 | tmp = _lookup_address_cpa(cpa, address, &level); |
65e074df TG |
571 | if (tmp != kpte) |
572 | goto out_unlock; | |
573 | ||
574 | switch (level) { | |
575 | case PG_LEVEL_2M: | |
3a19109e TK |
576 | old_prot = pmd_pgprot(*(pmd_t *)kpte); |
577 | old_pfn = pmd_pfn(*(pmd_t *)kpte); | |
578 | break; | |
65e074df | 579 | case PG_LEVEL_1G: |
3a19109e TK |
580 | old_prot = pud_pgprot(*(pud_t *)kpte); |
581 | old_pfn = pud_pfn(*(pud_t *)kpte); | |
f3c4fbb6 | 582 | break; |
65e074df | 583 | default: |
beaff633 | 584 | do_split = -EINVAL; |
65e074df TG |
585 | goto out_unlock; |
586 | } | |
587 | ||
3a19109e TK |
588 | psize = page_level_size(level); |
589 | pmask = page_level_mask(level); | |
590 | ||
65e074df TG |
591 | /* |
592 | * Calculate the number of pages, which fit into this large | |
593 | * page starting at address: | |
594 | */ | |
595 | nextpage_addr = (address + psize) & pmask; | |
596 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
9b5cf48b RW |
597 | if (numpages < cpa->numpages) |
598 | cpa->numpages = numpages; | |
65e074df TG |
599 | |
600 | /* | |
601 | * We are safe now. Check whether the new pgprot is the same: | |
f5b2831d JG |
602 | * Convert protection attributes to 4k-format, as cpa->mask* are set |
603 | * up accordingly. | |
65e074df TG |
604 | */ |
605 | old_pte = *kpte; | |
606c7193 | 606 | /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ |
55696b1f | 607 | req_prot = pgprot_large_2_4k(old_prot); |
65e074df | 608 | |
64edc8ed | 609 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
610 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | |
c31c7d48 | 611 | |
f5b2831d JG |
612 | /* |
613 | * req_prot is in format of 4k pages. It must be converted to large | |
614 | * page format: the caching mode includes the PAT bit located at | |
615 | * different bit positions in the two formats. | |
616 | */ | |
617 | req_prot = pgprot_4k_2_large(req_prot); | |
d1440b23 | 618 | req_prot = pgprot_clear_protnone_bits(req_prot); |
f76cfa3c | 619 | if (pgprot_val(req_prot) & _PAGE_PRESENT) |
d1440b23 | 620 | pgprot_val(req_prot) |= _PAGE_PSE; |
a8aed3e0 | 621 | |
c31c7d48 | 622 | /* |
3a19109e | 623 | * old_pfn points to the large page base pfn. So we need |
c31c7d48 TG |
624 | * to add the offset of the virtual address: |
625 | */ | |
3a19109e | 626 | pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); |
c31c7d48 TG |
627 | cpa->pfn = pfn; |
628 | ||
64edc8ed | 629 | new_prot = static_protections(req_prot, address, pfn); |
65e074df | 630 | |
fac84939 TG |
631 | /* |
632 | * We need to check the full range, whether | |
633 | * static_protection() requires a different pgprot for one of | |
634 | * the pages in the range we try to preserve: | |
635 | */ | |
64edc8ed | 636 | addr = address & pmask; |
3a19109e | 637 | pfn = old_pfn; |
64edc8ed | 638 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { |
639 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); | |
fac84939 TG |
640 | |
641 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
642 | goto out_unlock; | |
643 | } | |
644 | ||
65e074df TG |
645 | /* |
646 | * If there are no changes, return. maxpages has been updated | |
647 | * above: | |
648 | */ | |
649 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
beaff633 | 650 | do_split = 0; |
65e074df TG |
651 | goto out_unlock; |
652 | } | |
653 | ||
654 | /* | |
655 | * We need to change the attributes. Check, whether we can | |
656 | * change the large page in one go. We request a split, when | |
657 | * the address is not aligned and the number of pages is | |
658 | * smaller than the number of pages in the large page. Note | |
659 | * that we limited the number of possible pages already to | |
660 | * the number of pages in the large page. | |
661 | */ | |
64edc8ed | 662 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { |
65e074df TG |
663 | /* |
664 | * The address is aligned and the number of pages | |
665 | * covers the full page. | |
666 | */ | |
3a19109e | 667 | new_pte = pfn_pte(old_pfn, new_prot); |
65e074df | 668 | __set_pmd_pte(kpte, address, new_pte); |
d75586ad | 669 | cpa->flags |= CPA_FLUSHTLB; |
beaff633 | 670 | do_split = 0; |
65e074df TG |
671 | } |
672 | ||
673 | out_unlock: | |
a79e53d8 | 674 | spin_unlock(&pgd_lock); |
9df84993 | 675 | |
beaff633 | 676 | return do_split; |
65e074df TG |
677 | } |
678 | ||
5952886b | 679 | static int |
82f0712c BP |
680 | __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, |
681 | struct page *base) | |
bb5c2dbd | 682 | { |
5952886b | 683 | pte_t *pbase = (pte_t *)page_address(base); |
d551aaa2 | 684 | unsigned long ref_pfn, pfn, pfninc = 1; |
9df84993 | 685 | unsigned int i, level; |
ae9aae9e | 686 | pte_t *tmp; |
9df84993 | 687 | pgprot_t ref_prot; |
bb5c2dbd | 688 | |
a79e53d8 | 689 | spin_lock(&pgd_lock); |
bb5c2dbd IM |
690 | /* |
691 | * Check for races, another CPU might have split this page | |
692 | * up for us already: | |
693 | */ | |
82f0712c | 694 | tmp = _lookup_address_cpa(cpa, address, &level); |
ae9aae9e WC |
695 | if (tmp != kpte) { |
696 | spin_unlock(&pgd_lock); | |
697 | return 1; | |
698 | } | |
bb5c2dbd | 699 | |
6944a9c8 | 700 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
f5b2831d | 701 | |
d551aaa2 TK |
702 | switch (level) { |
703 | case PG_LEVEL_2M: | |
704 | ref_prot = pmd_pgprot(*(pmd_t *)kpte); | |
606c7193 DH |
705 | /* |
706 | * Clear PSE (aka _PAGE_PAT) and move | |
707 | * PAT bit to correct position. | |
708 | */ | |
f5b2831d | 709 | ref_prot = pgprot_large_2_4k(ref_prot); |
606c7193 | 710 | |
d551aaa2 TK |
711 | ref_pfn = pmd_pfn(*(pmd_t *)kpte); |
712 | break; | |
bb5c2dbd | 713 | |
d551aaa2 TK |
714 | case PG_LEVEL_1G: |
715 | ref_prot = pud_pgprot(*(pud_t *)kpte); | |
716 | ref_pfn = pud_pfn(*(pud_t *)kpte); | |
f07333fd | 717 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
d551aaa2 | 718 | |
a8aed3e0 | 719 | /* |
d551aaa2 | 720 | * Clear the PSE flags if the PRESENT flag is not set |
a8aed3e0 AA |
721 | * otherwise pmd_present/pmd_huge will return true |
722 | * even on a non present pmd. | |
723 | */ | |
d551aaa2 | 724 | if (!(pgprot_val(ref_prot) & _PAGE_PRESENT)) |
a8aed3e0 | 725 | pgprot_val(ref_prot) &= ~_PAGE_PSE; |
d551aaa2 TK |
726 | break; |
727 | ||
728 | default: | |
729 | spin_unlock(&pgd_lock); | |
730 | return 1; | |
f07333fd | 731 | } |
f07333fd | 732 | |
d1440b23 | 733 | ref_prot = pgprot_clear_protnone_bits(ref_prot); |
a8aed3e0 | 734 | |
63c1dcf4 TG |
735 | /* |
736 | * Get the target pfn from the original entry: | |
737 | */ | |
d551aaa2 | 738 | pfn = ref_pfn; |
f07333fd | 739 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
1a54420a | 740 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
bb5c2dbd | 741 | |
2c66e24d SP |
742 | if (virt_addr_valid(address)) { |
743 | unsigned long pfn = PFN_DOWN(__pa(address)); | |
744 | ||
745 | if (pfn_range_is_mapped(pfn, pfn + 1)) | |
746 | split_page_count(level); | |
747 | } | |
f361a450 | 748 | |
bb5c2dbd | 749 | /* |
07a66d7c | 750 | * Install the new, split up pagetable. |
4c881ca1 | 751 | * |
07a66d7c IM |
752 | * We use the standard kernel pagetable protections for the new |
753 | * pagetable protections, the actual ptes set above control the | |
754 | * primary protection behavior: | |
bb5c2dbd | 755 | */ |
07a66d7c | 756 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
211b3d03 IM |
757 | |
758 | /* | |
759 | * Intel Atom errata AAH41 workaround. | |
760 | * | |
761 | * The real fix should be in hw or in a microcode update, but | |
762 | * we also probabilistically try to reduce the window of having | |
763 | * a large TLB mixed with 4K TLBs while instruction fetches are | |
764 | * going on. | |
765 | */ | |
766 | __flush_tlb_all(); | |
ae9aae9e | 767 | spin_unlock(&pgd_lock); |
211b3d03 | 768 | |
ae9aae9e WC |
769 | return 0; |
770 | } | |
bb5c2dbd | 771 | |
82f0712c BP |
772 | static int split_large_page(struct cpa_data *cpa, pte_t *kpte, |
773 | unsigned long address) | |
ae9aae9e | 774 | { |
ae9aae9e WC |
775 | struct page *base; |
776 | ||
288cf3c6 | 777 | if (!debug_pagealloc_enabled()) |
ae9aae9e | 778 | spin_unlock(&cpa_lock); |
75f296d9 | 779 | base = alloc_pages(GFP_KERNEL, 0); |
288cf3c6 | 780 | if (!debug_pagealloc_enabled()) |
ae9aae9e WC |
781 | spin_lock(&cpa_lock); |
782 | if (!base) | |
783 | return -ENOMEM; | |
784 | ||
82f0712c | 785 | if (__split_large_page(cpa, kpte, address, base)) |
8311eb84 | 786 | __free_page(base); |
bb5c2dbd | 787 | |
bb5c2dbd IM |
788 | return 0; |
789 | } | |
790 | ||
52a628fb BP |
791 | static bool try_to_free_pte_page(pte_t *pte) |
792 | { | |
793 | int i; | |
794 | ||
795 | for (i = 0; i < PTRS_PER_PTE; i++) | |
796 | if (!pte_none(pte[i])) | |
797 | return false; | |
798 | ||
799 | free_page((unsigned long)pte); | |
800 | return true; | |
801 | } | |
802 | ||
803 | static bool try_to_free_pmd_page(pmd_t *pmd) | |
804 | { | |
805 | int i; | |
806 | ||
807 | for (i = 0; i < PTRS_PER_PMD; i++) | |
808 | if (!pmd_none(pmd[i])) | |
809 | return false; | |
810 | ||
811 | free_page((unsigned long)pmd); | |
812 | return true; | |
813 | } | |
814 | ||
815 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) | |
816 | { | |
817 | pte_t *pte = pte_offset_kernel(pmd, start); | |
818 | ||
819 | while (start < end) { | |
820 | set_pte(pte, __pte(0)); | |
821 | ||
822 | start += PAGE_SIZE; | |
823 | pte++; | |
824 | } | |
825 | ||
826 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { | |
827 | pmd_clear(pmd); | |
828 | return true; | |
829 | } | |
830 | return false; | |
831 | } | |
832 | ||
833 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, | |
834 | unsigned long start, unsigned long end) | |
835 | { | |
836 | if (unmap_pte_range(pmd, start, end)) | |
837 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | |
838 | pud_clear(pud); | |
839 | } | |
840 | ||
841 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | |
842 | { | |
843 | pmd_t *pmd = pmd_offset(pud, start); | |
844 | ||
845 | /* | |
846 | * Not on a 2MB page boundary? | |
847 | */ | |
848 | if (start & (PMD_SIZE - 1)) { | |
849 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | |
850 | unsigned long pre_end = min_t(unsigned long, end, next_page); | |
851 | ||
852 | __unmap_pmd_range(pud, pmd, start, pre_end); | |
853 | ||
854 | start = pre_end; | |
855 | pmd++; | |
856 | } | |
857 | ||
858 | /* | |
859 | * Try to unmap in 2M chunks. | |
860 | */ | |
861 | while (end - start >= PMD_SIZE) { | |
862 | if (pmd_large(*pmd)) | |
863 | pmd_clear(pmd); | |
864 | else | |
865 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); | |
866 | ||
867 | start += PMD_SIZE; | |
868 | pmd++; | |
869 | } | |
870 | ||
871 | /* | |
872 | * 4K leftovers? | |
873 | */ | |
874 | if (start < end) | |
875 | return __unmap_pmd_range(pud, pmd, start, end); | |
876 | ||
877 | /* | |
878 | * Try again to free the PMD page if haven't succeeded above. | |
879 | */ | |
880 | if (!pud_none(*pud)) | |
881 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | |
882 | pud_clear(pud); | |
883 | } | |
0bb8aeee | 884 | |
45478336 | 885 | static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) |
0bb8aeee | 886 | { |
45478336 | 887 | pud_t *pud = pud_offset(p4d, start); |
0bb8aeee BP |
888 | |
889 | /* | |
890 | * Not on a GB page boundary? | |
891 | */ | |
892 | if (start & (PUD_SIZE - 1)) { | |
893 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | |
894 | unsigned long pre_end = min_t(unsigned long, end, next_page); | |
895 | ||
896 | unmap_pmd_range(pud, start, pre_end); | |
897 | ||
898 | start = pre_end; | |
899 | pud++; | |
900 | } | |
901 | ||
902 | /* | |
903 | * Try to unmap in 1G chunks? | |
904 | */ | |
905 | while (end - start >= PUD_SIZE) { | |
906 | ||
907 | if (pud_large(*pud)) | |
908 | pud_clear(pud); | |
909 | else | |
910 | unmap_pmd_range(pud, start, start + PUD_SIZE); | |
911 | ||
912 | start += PUD_SIZE; | |
913 | pud++; | |
914 | } | |
915 | ||
916 | /* | |
917 | * 2M leftovers? | |
918 | */ | |
919 | if (start < end) | |
920 | unmap_pmd_range(pud, start, end); | |
921 | ||
922 | /* | |
923 | * No need to try to free the PUD page because we'll free it in | |
924 | * populate_pgd's error path | |
925 | */ | |
926 | } | |
927 | ||
f900a4b8 BP |
928 | static int alloc_pte_page(pmd_t *pmd) |
929 | { | |
75f296d9 | 930 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); |
f900a4b8 BP |
931 | if (!pte) |
932 | return -1; | |
933 | ||
934 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); | |
935 | return 0; | |
936 | } | |
937 | ||
4b23538d BP |
938 | static int alloc_pmd_page(pud_t *pud) |
939 | { | |
75f296d9 | 940 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
4b23538d BP |
941 | if (!pmd) |
942 | return -1; | |
943 | ||
944 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | |
945 | return 0; | |
946 | } | |
947 | ||
c6b6f363 BP |
948 | static void populate_pte(struct cpa_data *cpa, |
949 | unsigned long start, unsigned long end, | |
950 | unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) | |
951 | { | |
952 | pte_t *pte; | |
953 | ||
954 | pte = pte_offset_kernel(pmd, start); | |
955 | ||
d1440b23 | 956 | pgprot = pgprot_clear_protnone_bits(pgprot); |
c6b6f363 | 957 | |
c6b6f363 | 958 | while (num_pages-- && start < end) { |
edc3b912 | 959 | set_pte(pte, pfn_pte(cpa->pfn, pgprot)); |
c6b6f363 BP |
960 | |
961 | start += PAGE_SIZE; | |
edc3b912 | 962 | cpa->pfn++; |
c6b6f363 BP |
963 | pte++; |
964 | } | |
965 | } | |
f900a4b8 | 966 | |
e535ec08 MF |
967 | static long populate_pmd(struct cpa_data *cpa, |
968 | unsigned long start, unsigned long end, | |
969 | unsigned num_pages, pud_t *pud, pgprot_t pgprot) | |
f900a4b8 | 970 | { |
e535ec08 | 971 | long cur_pages = 0; |
f900a4b8 | 972 | pmd_t *pmd; |
f5b2831d | 973 | pgprot_t pmd_pgprot; |
f900a4b8 BP |
974 | |
975 | /* | |
976 | * Not on a 2M boundary? | |
977 | */ | |
978 | if (start & (PMD_SIZE - 1)) { | |
979 | unsigned long pre_end = start + (num_pages << PAGE_SHIFT); | |
980 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | |
981 | ||
982 | pre_end = min_t(unsigned long, pre_end, next_page); | |
983 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | |
984 | cur_pages = min_t(unsigned int, num_pages, cur_pages); | |
985 | ||
986 | /* | |
987 | * Need a PTE page? | |
988 | */ | |
989 | pmd = pmd_offset(pud, start); | |
990 | if (pmd_none(*pmd)) | |
991 | if (alloc_pte_page(pmd)) | |
992 | return -1; | |
993 | ||
994 | populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); | |
995 | ||
996 | start = pre_end; | |
997 | } | |
998 | ||
999 | /* | |
1000 | * We mapped them all? | |
1001 | */ | |
1002 | if (num_pages == cur_pages) | |
1003 | return cur_pages; | |
1004 | ||
f5b2831d JG |
1005 | pmd_pgprot = pgprot_4k_2_large(pgprot); |
1006 | ||
f900a4b8 BP |
1007 | while (end - start >= PMD_SIZE) { |
1008 | ||
1009 | /* | |
1010 | * We cannot use a 1G page so allocate a PMD page if needed. | |
1011 | */ | |
1012 | if (pud_none(*pud)) | |
1013 | if (alloc_pmd_page(pud)) | |
1014 | return -1; | |
1015 | ||
1016 | pmd = pmd_offset(pud, start); | |
1017 | ||
958f79b9 AK |
1018 | set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, |
1019 | canon_pgprot(pmd_pgprot)))); | |
f900a4b8 BP |
1020 | |
1021 | start += PMD_SIZE; | |
edc3b912 | 1022 | cpa->pfn += PMD_SIZE >> PAGE_SHIFT; |
f900a4b8 BP |
1023 | cur_pages += PMD_SIZE >> PAGE_SHIFT; |
1024 | } | |
1025 | ||
1026 | /* | |
1027 | * Map trailing 4K pages. | |
1028 | */ | |
1029 | if (start < end) { | |
1030 | pmd = pmd_offset(pud, start); | |
1031 | if (pmd_none(*pmd)) | |
1032 | if (alloc_pte_page(pmd)) | |
1033 | return -1; | |
1034 | ||
1035 | populate_pte(cpa, start, end, num_pages - cur_pages, | |
1036 | pmd, pgprot); | |
1037 | } | |
1038 | return num_pages; | |
1039 | } | |
4b23538d | 1040 | |
45478336 KS |
1041 | static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, |
1042 | pgprot_t pgprot) | |
4b23538d BP |
1043 | { |
1044 | pud_t *pud; | |
1045 | unsigned long end; | |
e535ec08 | 1046 | long cur_pages = 0; |
f5b2831d | 1047 | pgprot_t pud_pgprot; |
4b23538d BP |
1048 | |
1049 | end = start + (cpa->numpages << PAGE_SHIFT); | |
1050 | ||
1051 | /* | |
1052 | * Not on a Gb page boundary? => map everything up to it with | |
1053 | * smaller pages. | |
1054 | */ | |
1055 | if (start & (PUD_SIZE - 1)) { | |
1056 | unsigned long pre_end; | |
1057 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | |
1058 | ||
1059 | pre_end = min_t(unsigned long, end, next_page); | |
1060 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | |
1061 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); | |
1062 | ||
45478336 | 1063 | pud = pud_offset(p4d, start); |
4b23538d BP |
1064 | |
1065 | /* | |
1066 | * Need a PMD page? | |
1067 | */ | |
1068 | if (pud_none(*pud)) | |
1069 | if (alloc_pmd_page(pud)) | |
1070 | return -1; | |
1071 | ||
1072 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, | |
1073 | pud, pgprot); | |
1074 | if (cur_pages < 0) | |
1075 | return cur_pages; | |
1076 | ||
1077 | start = pre_end; | |
1078 | } | |
1079 | ||
1080 | /* We mapped them all? */ | |
1081 | if (cpa->numpages == cur_pages) | |
1082 | return cur_pages; | |
1083 | ||
45478336 | 1084 | pud = pud_offset(p4d, start); |
f5b2831d | 1085 | pud_pgprot = pgprot_4k_2_large(pgprot); |
4b23538d BP |
1086 | |
1087 | /* | |
1088 | * Map everything starting from the Gb boundary, possibly with 1G pages | |
1089 | */ | |
b8291adc | 1090 | while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { |
958f79b9 AK |
1091 | set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, |
1092 | canon_pgprot(pud_pgprot)))); | |
4b23538d BP |
1093 | |
1094 | start += PUD_SIZE; | |
edc3b912 | 1095 | cpa->pfn += PUD_SIZE >> PAGE_SHIFT; |
4b23538d BP |
1096 | cur_pages += PUD_SIZE >> PAGE_SHIFT; |
1097 | pud++; | |
1098 | } | |
1099 | ||
1100 | /* Map trailing leftover */ | |
1101 | if (start < end) { | |
e535ec08 | 1102 | long tmp; |
4b23538d | 1103 | |
45478336 | 1104 | pud = pud_offset(p4d, start); |
4b23538d BP |
1105 | if (pud_none(*pud)) |
1106 | if (alloc_pmd_page(pud)) | |
1107 | return -1; | |
1108 | ||
1109 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, | |
1110 | pud, pgprot); | |
1111 | if (tmp < 0) | |
1112 | return cur_pages; | |
1113 | ||
1114 | cur_pages += tmp; | |
1115 | } | |
1116 | return cur_pages; | |
1117 | } | |
f3f72966 BP |
1118 | |
1119 | /* | |
1120 | * Restrictions for kernel page table do not necessarily apply when mapping in | |
1121 | * an alternate PGD. | |
1122 | */ | |
1123 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) | |
1124 | { | |
1125 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); | |
f3f72966 | 1126 | pud_t *pud = NULL; /* shut up gcc */ |
45478336 | 1127 | p4d_t *p4d; |
42a54772 | 1128 | pgd_t *pgd_entry; |
e535ec08 | 1129 | long ret; |
f3f72966 BP |
1130 | |
1131 | pgd_entry = cpa->pgd + pgd_index(addr); | |
1132 | ||
45478336 | 1133 | if (pgd_none(*pgd_entry)) { |
75f296d9 | 1134 | p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); |
45478336 KS |
1135 | if (!p4d) |
1136 | return -1; | |
1137 | ||
1138 | set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE)); | |
1139 | } | |
1140 | ||
f3f72966 BP |
1141 | /* |
1142 | * Allocate a PUD page and hand it down for mapping. | |
1143 | */ | |
45478336 KS |
1144 | p4d = p4d_offset(pgd_entry, addr); |
1145 | if (p4d_none(*p4d)) { | |
75f296d9 | 1146 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL); |
f3f72966 BP |
1147 | if (!pud) |
1148 | return -1; | |
530dd8d4 | 1149 | |
45478336 | 1150 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); |
f3f72966 BP |
1151 | } |
1152 | ||
1153 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); | |
1154 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); | |
1155 | ||
45478336 | 1156 | ret = populate_pud(cpa, addr, p4d, pgprot); |
0bb8aeee | 1157 | if (ret < 0) { |
55920d31 AL |
1158 | /* |
1159 | * Leave the PUD page in place in case some other CPU or thread | |
1160 | * already found it, but remove any useless entries we just | |
1161 | * added to it. | |
1162 | */ | |
45478336 | 1163 | unmap_pud_range(p4d, addr, |
0bb8aeee | 1164 | addr + (cpa->numpages << PAGE_SHIFT)); |
f3f72966 | 1165 | return ret; |
0bb8aeee | 1166 | } |
42a54772 | 1167 | |
f3f72966 BP |
1168 | cpa->numpages = ret; |
1169 | return 0; | |
1170 | } | |
1171 | ||
a1e46212 SS |
1172 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
1173 | int primary) | |
1174 | { | |
7fc8442f MF |
1175 | if (cpa->pgd) { |
1176 | /* | |
1177 | * Right now, we only execute this code path when mapping | |
1178 | * the EFI virtual memory map regions, no other users | |
1179 | * provide a ->pgd value. This may change in the future. | |
1180 | */ | |
82f0712c | 1181 | return populate_pgd(cpa, vaddr); |
7fc8442f | 1182 | } |
82f0712c | 1183 | |
a1e46212 SS |
1184 | /* |
1185 | * Ignore all non primary paths. | |
1186 | */ | |
405e1133 JB |
1187 | if (!primary) { |
1188 | cpa->numpages = 1; | |
a1e46212 | 1189 | return 0; |
405e1133 | 1190 | } |
a1e46212 SS |
1191 | |
1192 | /* | |
1193 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | |
1194 | * to have holes. | |
1195 | * Also set numpages to '1' indicating that we processed cpa req for | |
1196 | * one virtual address page and its pfn. TBD: numpages can be set based | |
1197 | * on the initial value and the level returned by lookup_address(). | |
1198 | */ | |
1199 | if (within(vaddr, PAGE_OFFSET, | |
1200 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | |
1201 | cpa->numpages = 1; | |
1202 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | |
1203 | return 0; | |
58e65b51 DH |
1204 | |
1205 | } else if (__cpa_pfn_in_highmap(cpa->pfn)) { | |
1206 | /* Faults in the highmap are OK, so do not warn: */ | |
1207 | return -EFAULT; | |
a1e46212 SS |
1208 | } else { |
1209 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | |
1210 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | |
1211 | *cpa->vaddr); | |
1212 | ||
1213 | return -EFAULT; | |
1214 | } | |
1215 | } | |
1216 | ||
c31c7d48 | 1217 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
9f4c815c | 1218 | { |
d75586ad | 1219 | unsigned long address; |
da7bfc50 HH |
1220 | int do_split, err; |
1221 | unsigned int level; | |
c31c7d48 | 1222 | pte_t *kpte, old_pte; |
1da177e4 | 1223 | |
8523acfe TH |
1224 | if (cpa->flags & CPA_PAGES_ARRAY) { |
1225 | struct page *page = cpa->pages[cpa->curpage]; | |
1226 | if (unlikely(PageHighMem(page))) | |
1227 | return 0; | |
1228 | address = (unsigned long)page_address(page); | |
1229 | } else if (cpa->flags & CPA_ARRAY) | |
d75586ad SL |
1230 | address = cpa->vaddr[cpa->curpage]; |
1231 | else | |
1232 | address = *cpa->vaddr; | |
97f99fed | 1233 | repeat: |
82f0712c | 1234 | kpte = _lookup_address_cpa(cpa, address, &level); |
1da177e4 | 1235 | if (!kpte) |
a1e46212 | 1236 | return __cpa_process_fault(cpa, address, primary); |
c31c7d48 TG |
1237 | |
1238 | old_pte = *kpte; | |
dcb32d99 | 1239 | if (pte_none(old_pte)) |
a1e46212 | 1240 | return __cpa_process_fault(cpa, address, primary); |
9f4c815c | 1241 | |
30551bb3 | 1242 | if (level == PG_LEVEL_4K) { |
c31c7d48 | 1243 | pte_t new_pte; |
626c2c9d | 1244 | pgprot_t new_prot = pte_pgprot(old_pte); |
c31c7d48 | 1245 | unsigned long pfn = pte_pfn(old_pte); |
86f03989 | 1246 | |
72e458df TG |
1247 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
1248 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 | 1249 | |
c31c7d48 | 1250 | new_prot = static_protections(new_prot, address, pfn); |
86f03989 | 1251 | |
d1440b23 | 1252 | new_prot = pgprot_clear_protnone_bits(new_prot); |
a8aed3e0 | 1253 | |
626c2c9d AV |
1254 | /* |
1255 | * We need to keep the pfn from the existing PTE, | |
1256 | * after all we're only going to change it's attributes | |
1257 | * not the memory it points to | |
1258 | */ | |
1a54420a | 1259 | new_pte = pfn_pte(pfn, new_prot); |
c31c7d48 | 1260 | cpa->pfn = pfn; |
f4ae5da0 TG |
1261 | /* |
1262 | * Do we really change anything ? | |
1263 | */ | |
1264 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
1265 | set_pte_atomic(kpte, new_pte); | |
d75586ad | 1266 | cpa->flags |= CPA_FLUSHTLB; |
f4ae5da0 | 1267 | } |
9b5cf48b | 1268 | cpa->numpages = 1; |
65e074df | 1269 | return 0; |
1da177e4 | 1270 | } |
65e074df TG |
1271 | |
1272 | /* | |
1273 | * Check, whether we can keep the large page intact | |
1274 | * and just change the pte: | |
1275 | */ | |
beaff633 | 1276 | do_split = try_preserve_large_page(kpte, address, cpa); |
65e074df TG |
1277 | /* |
1278 | * When the range fits into the existing large page, | |
9b5cf48b | 1279 | * return. cp->numpages and cpa->tlbflush have been updated in |
65e074df TG |
1280 | * try_large_page: |
1281 | */ | |
87f7f8fe IM |
1282 | if (do_split <= 0) |
1283 | return do_split; | |
65e074df TG |
1284 | |
1285 | /* | |
1286 | * We have to split the large page: | |
1287 | */ | |
82f0712c | 1288 | err = split_large_page(cpa, kpte, address); |
87f7f8fe | 1289 | if (!err) { |
ad5ca55f SS |
1290 | /* |
1291 | * Do a global flush tlb after splitting the large page | |
1292 | * and before we do the actual change page attribute in the PTE. | |
1293 | * | |
1294 | * With out this, we violate the TLB application note, that says | |
1295 | * "The TLBs may contain both ordinary and large-page | |
1296 | * translations for a 4-KByte range of linear addresses. This | |
1297 | * may occur if software modifies the paging structures so that | |
1298 | * the page size used for the address range changes. If the two | |
1299 | * translations differ with respect to page frame or attributes | |
1300 | * (e.g., permissions), processor behavior is undefined and may | |
1301 | * be implementation-specific." | |
1302 | * | |
1303 | * We do this global tlb flush inside the cpa_lock, so that we | |
1304 | * don't allow any other cpu, with stale tlb entries change the | |
1305 | * page attribute in parallel, that also falls into the | |
1306 | * just split large page entry. | |
1307 | */ | |
1308 | flush_tlb_all(); | |
87f7f8fe IM |
1309 | goto repeat; |
1310 | } | |
beaff633 | 1311 | |
87f7f8fe | 1312 | return err; |
9f4c815c | 1313 | } |
1da177e4 | 1314 | |
c31c7d48 TG |
1315 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
1316 | ||
1317 | static int cpa_process_alias(struct cpa_data *cpa) | |
1da177e4 | 1318 | { |
c31c7d48 | 1319 | struct cpa_data alias_cpa; |
992f4c1c | 1320 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
e933a73f | 1321 | unsigned long vaddr; |
992f4c1c | 1322 | int ret; |
44af6c41 | 1323 | |
8eb5779f | 1324 | if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) |
c31c7d48 | 1325 | return 0; |
626c2c9d | 1326 | |
f34b439f TG |
1327 | /* |
1328 | * No need to redo, when the primary call touched the direct | |
1329 | * mapping already: | |
1330 | */ | |
8523acfe TH |
1331 | if (cpa->flags & CPA_PAGES_ARRAY) { |
1332 | struct page *page = cpa->pages[cpa->curpage]; | |
1333 | if (unlikely(PageHighMem(page))) | |
1334 | return 0; | |
1335 | vaddr = (unsigned long)page_address(page); | |
1336 | } else if (cpa->flags & CPA_ARRAY) | |
d75586ad SL |
1337 | vaddr = cpa->vaddr[cpa->curpage]; |
1338 | else | |
1339 | vaddr = *cpa->vaddr; | |
1340 | ||
1341 | if (!(within(vaddr, PAGE_OFFSET, | |
a1e46212 | 1342 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
44af6c41 | 1343 | |
f34b439f | 1344 | alias_cpa = *cpa; |
992f4c1c | 1345 | alias_cpa.vaddr = &laddr; |
9ae28475 | 1346 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
d75586ad | 1347 | |
f34b439f | 1348 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
992f4c1c TH |
1349 | if (ret) |
1350 | return ret; | |
f34b439f | 1351 | } |
44af6c41 | 1352 | |
44af6c41 | 1353 | #ifdef CONFIG_X86_64 |
488fd995 | 1354 | /* |
992f4c1c TH |
1355 | * If the primary call didn't touch the high mapping already |
1356 | * and the physical address is inside the kernel map, we need | |
0879750f | 1357 | * to touch the high mapped kernel as well: |
488fd995 | 1358 | */ |
992f4c1c | 1359 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
58e65b51 | 1360 | __cpa_pfn_in_highmap(cpa->pfn)) { |
992f4c1c TH |
1361 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + |
1362 | __START_KERNEL_map - phys_base; | |
1363 | alias_cpa = *cpa; | |
1364 | alias_cpa.vaddr = &temp_cpa_vaddr; | |
1365 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | |
c31c7d48 | 1366 | |
992f4c1c TH |
1367 | /* |
1368 | * The high mapping range is imprecise, so ignore the | |
1369 | * return value. | |
1370 | */ | |
1371 | __change_page_attr_set_clr(&alias_cpa, 0); | |
1372 | } | |
488fd995 | 1373 | #endif |
992f4c1c TH |
1374 | |
1375 | return 0; | |
1da177e4 LT |
1376 | } |
1377 | ||
c31c7d48 | 1378 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
ff31452b | 1379 | { |
e535ec08 MF |
1380 | unsigned long numpages = cpa->numpages; |
1381 | int ret; | |
ff31452b | 1382 | |
65e074df TG |
1383 | while (numpages) { |
1384 | /* | |
1385 | * Store the remaining nr of pages for the large page | |
1386 | * preservation check. | |
1387 | */ | |
9b5cf48b | 1388 | cpa->numpages = numpages; |
d75586ad | 1389 | /* for array changes, we can't use large page */ |
9ae28475 | 1390 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
d75586ad | 1391 | cpa->numpages = 1; |
c31c7d48 | 1392 | |
288cf3c6 | 1393 | if (!debug_pagealloc_enabled()) |
ad5ca55f | 1394 | spin_lock(&cpa_lock); |
c31c7d48 | 1395 | ret = __change_page_attr(cpa, checkalias); |
288cf3c6 | 1396 | if (!debug_pagealloc_enabled()) |
ad5ca55f | 1397 | spin_unlock(&cpa_lock); |
ff31452b TG |
1398 | if (ret) |
1399 | return ret; | |
ff31452b | 1400 | |
c31c7d48 TG |
1401 | if (checkalias) { |
1402 | ret = cpa_process_alias(cpa); | |
1403 | if (ret) | |
1404 | return ret; | |
1405 | } | |
1406 | ||
65e074df TG |
1407 | /* |
1408 | * Adjust the number of pages with the result of the | |
1409 | * CPA operation. Either a large page has been | |
1410 | * preserved or a single page update happened. | |
1411 | */ | |
74256377 | 1412 | BUG_ON(cpa->numpages > numpages || !cpa->numpages); |
9b5cf48b | 1413 | numpages -= cpa->numpages; |
9ae28475 | 1414 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) |
d75586ad SL |
1415 | cpa->curpage++; |
1416 | else | |
1417 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
1418 | ||
65e074df | 1419 | } |
ff31452b TG |
1420 | return 0; |
1421 | } | |
1422 | ||
d75586ad | 1423 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
c9caa02c | 1424 | pgprot_t mask_set, pgprot_t mask_clr, |
9ae28475 | 1425 | int force_split, int in_flag, |
1426 | struct page **pages) | |
ff31452b | 1427 | { |
72e458df | 1428 | struct cpa_data cpa; |
cacf8906 | 1429 | int ret, cache, checkalias; |
fa526d0d | 1430 | unsigned long baddr = 0; |
331e4065 | 1431 | |
82f0712c BP |
1432 | memset(&cpa, 0, sizeof(cpa)); |
1433 | ||
331e4065 | 1434 | /* |
39114b7a DH |
1435 | * Check, if we are requested to set a not supported |
1436 | * feature. Clearing non-supported features is OK. | |
331e4065 TG |
1437 | */ |
1438 | mask_set = canon_pgprot(mask_set); | |
39114b7a | 1439 | |
c9caa02c | 1440 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
331e4065 TG |
1441 | return 0; |
1442 | ||
69b1415e | 1443 | /* Ensure we are PAGE_SIZE aligned */ |
9ae28475 | 1444 | if (in_flag & CPA_ARRAY) { |
d75586ad SL |
1445 | int i; |
1446 | for (i = 0; i < numpages; i++) { | |
1447 | if (addr[i] & ~PAGE_MASK) { | |
1448 | addr[i] &= PAGE_MASK; | |
1449 | WARN_ON_ONCE(1); | |
1450 | } | |
1451 | } | |
9ae28475 | 1452 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { |
1453 | /* | |
1454 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. | |
1455 | * No need to cehck in that case | |
1456 | */ | |
1457 | if (*addr & ~PAGE_MASK) { | |
1458 | *addr &= PAGE_MASK; | |
1459 | /* | |
1460 | * People should not be passing in unaligned addresses: | |
1461 | */ | |
1462 | WARN_ON_ONCE(1); | |
1463 | } | |
fa526d0d JS |
1464 | /* |
1465 | * Save address for cache flush. *addr is modified in the call | |
1466 | * to __change_page_attr_set_clr() below. | |
1467 | */ | |
1468 | baddr = *addr; | |
69b1415e TG |
1469 | } |
1470 | ||
5843d9a4 NP |
1471 | /* Must avoid aliasing mappings in the highmem code */ |
1472 | kmap_flush_unused(); | |
1473 | ||
db64fe02 NP |
1474 | vm_unmap_aliases(); |
1475 | ||
72e458df | 1476 | cpa.vaddr = addr; |
9ae28475 | 1477 | cpa.pages = pages; |
72e458df TG |
1478 | cpa.numpages = numpages; |
1479 | cpa.mask_set = mask_set; | |
1480 | cpa.mask_clr = mask_clr; | |
d75586ad SL |
1481 | cpa.flags = 0; |
1482 | cpa.curpage = 0; | |
c9caa02c | 1483 | cpa.force_split = force_split; |
72e458df | 1484 | |
9ae28475 | 1485 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
1486 | cpa.flags |= in_flag; | |
d75586ad | 1487 | |
af96e443 TG |
1488 | /* No alias checking for _NX bit modifications */ |
1489 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
c40a56a7 DH |
1490 | /* Has caller explicitly disabled alias checking? */ |
1491 | if (in_flag & CPA_NO_CHECK_ALIAS) | |
1492 | checkalias = 0; | |
af96e443 TG |
1493 | |
1494 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
ff31452b | 1495 | |
f4ae5da0 TG |
1496 | /* |
1497 | * Check whether we really changed something: | |
1498 | */ | |
d75586ad | 1499 | if (!(cpa.flags & CPA_FLUSHTLB)) |
1ac2f7d5 | 1500 | goto out; |
cacf8906 | 1501 | |
6bb8383b AK |
1502 | /* |
1503 | * No need to flush, when we did not set any of the caching | |
1504 | * attributes: | |
1505 | */ | |
c06814d8 | 1506 | cache = !!pgprot2cachemode(mask_set); |
6bb8383b | 1507 | |
57a6a46a | 1508 | /* |
b82ad3d3 BP |
1509 | * On success we use CLFLUSH, when the CPU supports it to |
1510 | * avoid the WBINVD. If the CPU does not support it and in the | |
f026cfa8 | 1511 | * error case we fall back to cpa_flush_all (which uses |
b82ad3d3 | 1512 | * WBINVD): |
57a6a46a | 1513 | */ |
906bf7fd | 1514 | if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) { |
9ae28475 | 1515 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { |
1516 | cpa_flush_array(addr, numpages, cache, | |
1517 | cpa.flags, pages); | |
1518 | } else | |
fa526d0d | 1519 | cpa_flush_range(baddr, numpages, cache); |
d75586ad | 1520 | } else |
6bb8383b | 1521 | cpa_flush_all(cache); |
cacf8906 | 1522 | |
76ebd054 | 1523 | out: |
ff31452b TG |
1524 | return ret; |
1525 | } | |
1526 | ||
d75586ad SL |
1527 | static inline int change_page_attr_set(unsigned long *addr, int numpages, |
1528 | pgprot_t mask, int array) | |
75cbade8 | 1529 | { |
d75586ad | 1530 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, |
9ae28475 | 1531 | (array ? CPA_ARRAY : 0), NULL); |
75cbade8 AV |
1532 | } |
1533 | ||
d75586ad SL |
1534 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, |
1535 | pgprot_t mask, int array) | |
72932c7a | 1536 | { |
d75586ad | 1537 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, |
9ae28475 | 1538 | (array ? CPA_ARRAY : 0), NULL); |
72932c7a TG |
1539 | } |
1540 | ||
0f350755 | 1541 | static inline int cpa_set_pages_array(struct page **pages, int numpages, |
1542 | pgprot_t mask) | |
1543 | { | |
1544 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, | |
1545 | CPA_PAGES_ARRAY, pages); | |
1546 | } | |
1547 | ||
1548 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, | |
1549 | pgprot_t mask) | |
1550 | { | |
1551 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, | |
1552 | CPA_PAGES_ARRAY, pages); | |
1553 | } | |
1554 | ||
1219333d | 1555 | int _set_memory_uc(unsigned long addr, int numpages) |
72932c7a | 1556 | { |
de33c442 SS |
1557 | /* |
1558 | * for now UC MINUS. see comments in ioremap_nocache() | |
e4b6be33 LR |
1559 | * If you really need strong UC use ioremap_uc(), but note |
1560 | * that you cannot override IO areas with set_memory_*() as | |
1561 | * these helpers cannot work with IO memory. | |
de33c442 | 1562 | */ |
d75586ad | 1563 | return change_page_attr_set(&addr, numpages, |
c06814d8 JG |
1564 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1565 | 0); | |
75cbade8 | 1566 | } |
1219333d | 1567 | |
1568 | int set_memory_uc(unsigned long addr, int numpages) | |
1569 | { | |
9fa3ab39 | 1570 | int ret; |
1571 | ||
de33c442 SS |
1572 | /* |
1573 | * for now UC MINUS. see comments in ioremap_nocache() | |
1574 | */ | |
9fa3ab39 | 1575 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
e00c8cc9 | 1576 | _PAGE_CACHE_MODE_UC_MINUS, NULL); |
9fa3ab39 | 1577 | if (ret) |
1578 | goto out_err; | |
1579 | ||
1580 | ret = _set_memory_uc(addr, numpages); | |
1581 | if (ret) | |
1582 | goto out_free; | |
1583 | ||
1584 | return 0; | |
1219333d | 1585 | |
9fa3ab39 | 1586 | out_free: |
1587 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1588 | out_err: | |
1589 | return ret; | |
1219333d | 1590 | } |
75cbade8 AV |
1591 | EXPORT_SYMBOL(set_memory_uc); |
1592 | ||
2d070eff | 1593 | static int _set_memory_array(unsigned long *addr, int addrinarray, |
c06814d8 | 1594 | enum page_cache_mode new_type) |
d75586ad | 1595 | { |
623dffb2 | 1596 | enum page_cache_mode set_type; |
9fa3ab39 | 1597 | int i, j; |
1598 | int ret; | |
1599 | ||
d75586ad | 1600 | for (i = 0; i < addrinarray; i++) { |
9fa3ab39 | 1601 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, |
4f646254 | 1602 | new_type, NULL); |
9fa3ab39 | 1603 | if (ret) |
1604 | goto out_free; | |
d75586ad SL |
1605 | } |
1606 | ||
623dffb2 TK |
1607 | /* If WC, set to UC- first and then WC */ |
1608 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? | |
1609 | _PAGE_CACHE_MODE_UC_MINUS : new_type; | |
1610 | ||
9fa3ab39 | 1611 | ret = change_page_attr_set(addr, addrinarray, |
623dffb2 | 1612 | cachemode2pgprot(set_type), 1); |
4f646254 | 1613 | |
c06814d8 | 1614 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
4f646254 | 1615 | ret = change_page_attr_set_clr(addr, addrinarray, |
c06814d8 JG |
1616 | cachemode2pgprot( |
1617 | _PAGE_CACHE_MODE_WC), | |
4f646254 PN |
1618 | __pgprot(_PAGE_CACHE_MASK), |
1619 | 0, CPA_ARRAY, NULL); | |
9fa3ab39 | 1620 | if (ret) |
1621 | goto out_free; | |
1622 | ||
1623 | return 0; | |
1624 | ||
1625 | out_free: | |
1626 | for (j = 0; j < i; j++) | |
1627 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); | |
1628 | ||
1629 | return ret; | |
d75586ad | 1630 | } |
4f646254 PN |
1631 | |
1632 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | |
1633 | { | |
c06814d8 | 1634 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
4f646254 | 1635 | } |
d75586ad SL |
1636 | EXPORT_SYMBOL(set_memory_array_uc); |
1637 | ||
4f646254 PN |
1638 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
1639 | { | |
c06814d8 | 1640 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); |
4f646254 PN |
1641 | } |
1642 | EXPORT_SYMBOL(set_memory_array_wc); | |
1643 | ||
623dffb2 TK |
1644 | int set_memory_array_wt(unsigned long *addr, int addrinarray) |
1645 | { | |
1646 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT); | |
1647 | } | |
1648 | EXPORT_SYMBOL_GPL(set_memory_array_wt); | |
1649 | ||
ef354af4 | 1650 | int _set_memory_wc(unsigned long addr, int numpages) |
1651 | { | |
3869c4aa | 1652 | int ret; |
bdc6340f PV |
1653 | unsigned long addr_copy = addr; |
1654 | ||
3869c4aa | 1655 | ret = change_page_attr_set(&addr, numpages, |
c06814d8 JG |
1656 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1657 | 0); | |
3869c4aa | 1658 | if (!ret) { |
bdc6340f | 1659 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
c06814d8 JG |
1660 | cachemode2pgprot( |
1661 | _PAGE_CACHE_MODE_WC), | |
bdc6340f PV |
1662 | __pgprot(_PAGE_CACHE_MASK), |
1663 | 0, 0, NULL); | |
3869c4aa | 1664 | } |
1665 | return ret; | |
ef354af4 | 1666 | } |
1667 | ||
1668 | int set_memory_wc(unsigned long addr, int numpages) | |
1669 | { | |
9fa3ab39 | 1670 | int ret; |
1671 | ||
9fa3ab39 | 1672 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
e00c8cc9 | 1673 | _PAGE_CACHE_MODE_WC, NULL); |
9fa3ab39 | 1674 | if (ret) |
623dffb2 | 1675 | return ret; |
ef354af4 | 1676 | |
9fa3ab39 | 1677 | ret = _set_memory_wc(addr, numpages); |
1678 | if (ret) | |
623dffb2 | 1679 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
9fa3ab39 | 1680 | |
9fa3ab39 | 1681 | return ret; |
ef354af4 | 1682 | } |
1683 | EXPORT_SYMBOL(set_memory_wc); | |
1684 | ||
623dffb2 TK |
1685 | int _set_memory_wt(unsigned long addr, int numpages) |
1686 | { | |
1687 | return change_page_attr_set(&addr, numpages, | |
1688 | cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0); | |
1689 | } | |
1690 | ||
1691 | int set_memory_wt(unsigned long addr, int numpages) | |
1692 | { | |
1693 | int ret; | |
1694 | ||
1695 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | |
1696 | _PAGE_CACHE_MODE_WT, NULL); | |
1697 | if (ret) | |
1698 | return ret; | |
1699 | ||
1700 | ret = _set_memory_wt(addr, numpages); | |
1701 | if (ret) | |
1702 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1703 | ||
1704 | return ret; | |
1705 | } | |
1706 | EXPORT_SYMBOL_GPL(set_memory_wt); | |
1707 | ||
1219333d | 1708 | int _set_memory_wb(unsigned long addr, int numpages) |
75cbade8 | 1709 | { |
c06814d8 | 1710 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
d75586ad SL |
1711 | return change_page_attr_clear(&addr, numpages, |
1712 | __pgprot(_PAGE_CACHE_MASK), 0); | |
75cbade8 | 1713 | } |
1219333d | 1714 | |
1715 | int set_memory_wb(unsigned long addr, int numpages) | |
1716 | { | |
9fa3ab39 | 1717 | int ret; |
1718 | ||
1719 | ret = _set_memory_wb(addr, numpages); | |
1720 | if (ret) | |
1721 | return ret; | |
1722 | ||
c15238df | 1723 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
9fa3ab39 | 1724 | return 0; |
1219333d | 1725 | } |
75cbade8 AV |
1726 | EXPORT_SYMBOL(set_memory_wb); |
1727 | ||
d75586ad SL |
1728 | int set_memory_array_wb(unsigned long *addr, int addrinarray) |
1729 | { | |
1730 | int i; | |
a5593e0b | 1731 | int ret; |
1732 | ||
c06814d8 | 1733 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
a5593e0b | 1734 | ret = change_page_attr_clear(addr, addrinarray, |
1735 | __pgprot(_PAGE_CACHE_MASK), 1); | |
9fa3ab39 | 1736 | if (ret) |
1737 | return ret; | |
d75586ad | 1738 | |
9fa3ab39 | 1739 | for (i = 0; i < addrinarray; i++) |
1740 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); | |
c5e147cf | 1741 | |
9fa3ab39 | 1742 | return 0; |
d75586ad SL |
1743 | } |
1744 | EXPORT_SYMBOL(set_memory_array_wb); | |
1745 | ||
75cbade8 AV |
1746 | int set_memory_x(unsigned long addr, int numpages) |
1747 | { | |
583140af PA |
1748 | if (!(__supported_pte_mask & _PAGE_NX)) |
1749 | return 0; | |
1750 | ||
d75586ad | 1751 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); |
75cbade8 AV |
1752 | } |
1753 | EXPORT_SYMBOL(set_memory_x); | |
1754 | ||
1755 | int set_memory_nx(unsigned long addr, int numpages) | |
1756 | { | |
583140af PA |
1757 | if (!(__supported_pte_mask & _PAGE_NX)) |
1758 | return 0; | |
1759 | ||
d75586ad | 1760 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); |
75cbade8 AV |
1761 | } |
1762 | EXPORT_SYMBOL(set_memory_nx); | |
1763 | ||
1764 | int set_memory_ro(unsigned long addr, int numpages) | |
1765 | { | |
d75586ad | 1766 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); |
75cbade8 | 1767 | } |
75cbade8 AV |
1768 | |
1769 | int set_memory_rw(unsigned long addr, int numpages) | |
1770 | { | |
d75586ad | 1771 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); |
75cbade8 | 1772 | } |
f62d0f00 IM |
1773 | |
1774 | int set_memory_np(unsigned long addr, int numpages) | |
1775 | { | |
d75586ad | 1776 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
f62d0f00 | 1777 | } |
75cbade8 | 1778 | |
c40a56a7 DH |
1779 | int set_memory_np_noalias(unsigned long addr, int numpages) |
1780 | { | |
1781 | int cpa_flags = CPA_NO_CHECK_ALIAS; | |
1782 | ||
1783 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), | |
1784 | __pgprot(_PAGE_PRESENT), 0, | |
1785 | cpa_flags, NULL); | |
1786 | } | |
1787 | ||
c9caa02c AK |
1788 | int set_memory_4k(unsigned long addr, int numpages) |
1789 | { | |
d75586ad | 1790 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
9ae28475 | 1791 | __pgprot(0), 1, 0, NULL); |
c9caa02c AK |
1792 | } |
1793 | ||
39114b7a DH |
1794 | int set_memory_nonglobal(unsigned long addr, int numpages) |
1795 | { | |
1796 | return change_page_attr_clear(&addr, numpages, | |
1797 | __pgprot(_PAGE_GLOBAL), 0); | |
1798 | } | |
1799 | ||
eac7073a DH |
1800 | int set_memory_global(unsigned long addr, int numpages) |
1801 | { | |
1802 | return change_page_attr_set(&addr, numpages, | |
1803 | __pgprot(_PAGE_GLOBAL), 0); | |
1804 | } | |
1805 | ||
77bd2342 TL |
1806 | static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) |
1807 | { | |
1808 | struct cpa_data cpa; | |
1809 | unsigned long start; | |
1810 | int ret; | |
1811 | ||
a72ec5a3 TL |
1812 | /* Nothing to do if memory encryption is not active */ |
1813 | if (!mem_encrypt_active()) | |
77bd2342 TL |
1814 | return 0; |
1815 | ||
1816 | /* Should not be working on unaligned addresses */ | |
1817 | if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) | |
1818 | addr &= PAGE_MASK; | |
1819 | ||
1820 | start = addr; | |
1821 | ||
1822 | memset(&cpa, 0, sizeof(cpa)); | |
1823 | cpa.vaddr = &addr; | |
1824 | cpa.numpages = numpages; | |
1825 | cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0); | |
1826 | cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC); | |
1827 | cpa.pgd = init_mm.pgd; | |
1828 | ||
1829 | /* Must avoid aliasing mappings in the highmem code */ | |
1830 | kmap_flush_unused(); | |
1831 | vm_unmap_aliases(); | |
1832 | ||
1833 | /* | |
1834 | * Before changing the encryption attribute, we need to flush caches. | |
1835 | */ | |
1836 | if (static_cpu_has(X86_FEATURE_CLFLUSH)) | |
1837 | cpa_flush_range(start, numpages, 1); | |
1838 | else | |
1839 | cpa_flush_all(1); | |
1840 | ||
1841 | ret = __change_page_attr_set_clr(&cpa, 1); | |
1842 | ||
1843 | /* | |
1844 | * After changing the encryption attribute, we need to flush TLBs | |
1845 | * again in case any speculative TLB caching occurred (but no need | |
1846 | * to flush caches again). We could just use cpa_flush_all(), but | |
1847 | * in case TLB flushing gets optimized in the cpa_flush_range() | |
1848 | * path use the same logic as above. | |
1849 | */ | |
1850 | if (static_cpu_has(X86_FEATURE_CLFLUSH)) | |
1851 | cpa_flush_range(start, numpages, 0); | |
1852 | else | |
1853 | cpa_flush_all(0); | |
1854 | ||
1855 | return ret; | |
1856 | } | |
1857 | ||
1858 | int set_memory_encrypted(unsigned long addr, int numpages) | |
1859 | { | |
1860 | return __set_memory_enc_dec(addr, numpages, true); | |
1861 | } | |
95cf9264 | 1862 | EXPORT_SYMBOL_GPL(set_memory_encrypted); |
77bd2342 TL |
1863 | |
1864 | int set_memory_decrypted(unsigned long addr, int numpages) | |
1865 | { | |
1866 | return __set_memory_enc_dec(addr, numpages, false); | |
1867 | } | |
95cf9264 | 1868 | EXPORT_SYMBOL_GPL(set_memory_decrypted); |
77bd2342 | 1869 | |
75cbade8 AV |
1870 | int set_pages_uc(struct page *page, int numpages) |
1871 | { | |
1872 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1873 | |
d7c8f21a | 1874 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
1875 | } |
1876 | EXPORT_SYMBOL(set_pages_uc); | |
1877 | ||
4f646254 | 1878 | static int _set_pages_array(struct page **pages, int addrinarray, |
c06814d8 | 1879 | enum page_cache_mode new_type) |
0f350755 | 1880 | { |
1881 | unsigned long start; | |
1882 | unsigned long end; | |
623dffb2 | 1883 | enum page_cache_mode set_type; |
0f350755 | 1884 | int i; |
1885 | int free_idx; | |
4f646254 | 1886 | int ret; |
0f350755 | 1887 | |
1888 | for (i = 0; i < addrinarray; i++) { | |
8523acfe TH |
1889 | if (PageHighMem(pages[i])) |
1890 | continue; | |
1891 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 1892 | end = start + PAGE_SIZE; |
4f646254 | 1893 | if (reserve_memtype(start, end, new_type, NULL)) |
0f350755 | 1894 | goto err_out; |
1895 | } | |
1896 | ||
623dffb2 TK |
1897 | /* If WC, set to UC- first and then WC */ |
1898 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? | |
1899 | _PAGE_CACHE_MODE_UC_MINUS : new_type; | |
1900 | ||
4f646254 | 1901 | ret = cpa_set_pages_array(pages, addrinarray, |
623dffb2 | 1902 | cachemode2pgprot(set_type)); |
c06814d8 | 1903 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
4f646254 | 1904 | ret = change_page_attr_set_clr(NULL, addrinarray, |
c06814d8 JG |
1905 | cachemode2pgprot( |
1906 | _PAGE_CACHE_MODE_WC), | |
4f646254 PN |
1907 | __pgprot(_PAGE_CACHE_MASK), |
1908 | 0, CPA_PAGES_ARRAY, pages); | |
1909 | if (ret) | |
1910 | goto err_out; | |
1911 | return 0; /* Success */ | |
0f350755 | 1912 | err_out: |
1913 | free_idx = i; | |
1914 | for (i = 0; i < free_idx; i++) { | |
8523acfe TH |
1915 | if (PageHighMem(pages[i])) |
1916 | continue; | |
1917 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 1918 | end = start + PAGE_SIZE; |
1919 | free_memtype(start, end); | |
1920 | } | |
1921 | return -EINVAL; | |
1922 | } | |
4f646254 PN |
1923 | |
1924 | int set_pages_array_uc(struct page **pages, int addrinarray) | |
1925 | { | |
c06814d8 | 1926 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
4f646254 | 1927 | } |
0f350755 | 1928 | EXPORT_SYMBOL(set_pages_array_uc); |
1929 | ||
4f646254 PN |
1930 | int set_pages_array_wc(struct page **pages, int addrinarray) |
1931 | { | |
c06814d8 | 1932 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); |
4f646254 PN |
1933 | } |
1934 | EXPORT_SYMBOL(set_pages_array_wc); | |
1935 | ||
623dffb2 TK |
1936 | int set_pages_array_wt(struct page **pages, int addrinarray) |
1937 | { | |
1938 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT); | |
1939 | } | |
1940 | EXPORT_SYMBOL_GPL(set_pages_array_wt); | |
1941 | ||
75cbade8 AV |
1942 | int set_pages_wb(struct page *page, int numpages) |
1943 | { | |
1944 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1945 | |
d7c8f21a | 1946 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
1947 | } |
1948 | EXPORT_SYMBOL(set_pages_wb); | |
1949 | ||
0f350755 | 1950 | int set_pages_array_wb(struct page **pages, int addrinarray) |
1951 | { | |
1952 | int retval; | |
1953 | unsigned long start; | |
1954 | unsigned long end; | |
1955 | int i; | |
1956 | ||
c06814d8 | 1957 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
0f350755 | 1958 | retval = cpa_clear_pages_array(pages, addrinarray, |
1959 | __pgprot(_PAGE_CACHE_MASK)); | |
9fa3ab39 | 1960 | if (retval) |
1961 | return retval; | |
0f350755 | 1962 | |
1963 | for (i = 0; i < addrinarray; i++) { | |
8523acfe TH |
1964 | if (PageHighMem(pages[i])) |
1965 | continue; | |
1966 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 1967 | end = start + PAGE_SIZE; |
1968 | free_memtype(start, end); | |
1969 | } | |
1970 | ||
9fa3ab39 | 1971 | return 0; |
0f350755 | 1972 | } |
1973 | EXPORT_SYMBOL(set_pages_array_wb); | |
1974 | ||
75cbade8 AV |
1975 | int set_pages_x(struct page *page, int numpages) |
1976 | { | |
1977 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1978 | |
d7c8f21a | 1979 | return set_memory_x(addr, numpages); |
75cbade8 AV |
1980 | } |
1981 | EXPORT_SYMBOL(set_pages_x); | |
1982 | ||
1983 | int set_pages_nx(struct page *page, int numpages) | |
1984 | { | |
1985 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1986 | |
d7c8f21a | 1987 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
1988 | } |
1989 | EXPORT_SYMBOL(set_pages_nx); | |
1990 | ||
1991 | int set_pages_ro(struct page *page, int numpages) | |
1992 | { | |
1993 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1994 | |
d7c8f21a | 1995 | return set_memory_ro(addr, numpages); |
75cbade8 | 1996 | } |
75cbade8 AV |
1997 | |
1998 | int set_pages_rw(struct page *page, int numpages) | |
1999 | { | |
2000 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 2001 | |
d7c8f21a | 2002 | return set_memory_rw(addr, numpages); |
78c94aba IM |
2003 | } |
2004 | ||
1da177e4 | 2005 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
2006 | |
2007 | static int __set_pages_p(struct page *page, int numpages) | |
2008 | { | |
d75586ad SL |
2009 | unsigned long tempaddr = (unsigned long) page_address(page); |
2010 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
82f0712c | 2011 | .pgd = NULL, |
72e458df TG |
2012 | .numpages = numpages, |
2013 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
d75586ad SL |
2014 | .mask_clr = __pgprot(0), |
2015 | .flags = 0}; | |
72932c7a | 2016 | |
55121b43 SS |
2017 | /* |
2018 | * No alias checking needed for setting present flag. otherwise, | |
2019 | * we may need to break large pages for 64-bit kernel text | |
2020 | * mappings (this adds to complexity if we want to do this from | |
2021 | * atomic context especially). Let's keep it simple! | |
2022 | */ | |
2023 | return __change_page_attr_set_clr(&cpa, 0); | |
f62d0f00 IM |
2024 | } |
2025 | ||
2026 | static int __set_pages_np(struct page *page, int numpages) | |
2027 | { | |
d75586ad SL |
2028 | unsigned long tempaddr = (unsigned long) page_address(page); |
2029 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
82f0712c | 2030 | .pgd = NULL, |
72e458df TG |
2031 | .numpages = numpages, |
2032 | .mask_set = __pgprot(0), | |
d75586ad SL |
2033 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
2034 | .flags = 0}; | |
72932c7a | 2035 | |
55121b43 SS |
2036 | /* |
2037 | * No alias checking needed for setting not present flag. otherwise, | |
2038 | * we may need to break large pages for 64-bit kernel text | |
2039 | * mappings (this adds to complexity if we want to do this from | |
2040 | * atomic context especially). Let's keep it simple! | |
2041 | */ | |
2042 | return __change_page_attr_set_clr(&cpa, 0); | |
f62d0f00 IM |
2043 | } |
2044 | ||
031bc574 | 2045 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
1da177e4 LT |
2046 | { |
2047 | if (PageHighMem(page)) | |
2048 | return; | |
9f4c815c | 2049 | if (!enable) { |
f9b8404c IM |
2050 | debug_check_no_locks_freed(page_address(page), |
2051 | numpages * PAGE_SIZE); | |
9f4c815c | 2052 | } |
de5097c2 | 2053 | |
9f4c815c | 2054 | /* |
f8d8406b | 2055 | * The return value is ignored as the calls cannot fail. |
55121b43 SS |
2056 | * Large pages for identity mappings are not used at boot time |
2057 | * and hence no memory allocations during large page split. | |
1da177e4 | 2058 | */ |
f62d0f00 IM |
2059 | if (enable) |
2060 | __set_pages_p(page, numpages); | |
2061 | else | |
2062 | __set_pages_np(page, numpages); | |
9f4c815c IM |
2063 | |
2064 | /* | |
e4b71dcf IM |
2065 | * We should perform an IPI and flush all tlbs, |
2066 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
2067 | */ |
2068 | __flush_tlb_all(); | |
26564600 BO |
2069 | |
2070 | arch_flush_lazy_mmu_mode(); | |
ee7ae7a1 TG |
2071 | } |
2072 | ||
8a235efa RW |
2073 | #ifdef CONFIG_HIBERNATION |
2074 | ||
2075 | bool kernel_page_present(struct page *page) | |
2076 | { | |
2077 | unsigned int level; | |
2078 | pte_t *pte; | |
2079 | ||
2080 | if (PageHighMem(page)) | |
2081 | return false; | |
2082 | ||
2083 | pte = lookup_address((unsigned long)page_address(page), &level); | |
2084 | return (pte_val(*pte) & _PAGE_PRESENT); | |
2085 | } | |
2086 | ||
2087 | #endif /* CONFIG_HIBERNATION */ | |
2088 | ||
2089 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
d1028a15 | 2090 | |
82f0712c BP |
2091 | int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
2092 | unsigned numpages, unsigned long page_flags) | |
2093 | { | |
2094 | int retval = -EINVAL; | |
2095 | ||
2096 | struct cpa_data cpa = { | |
2097 | .vaddr = &address, | |
2098 | .pfn = pfn, | |
2099 | .pgd = pgd, | |
2100 | .numpages = numpages, | |
2101 | .mask_set = __pgprot(0), | |
2102 | .mask_clr = __pgprot(0), | |
2103 | .flags = 0, | |
2104 | }; | |
2105 | ||
2106 | if (!(__supported_pte_mask & _PAGE_NX)) | |
2107 | goto out; | |
2108 | ||
2109 | if (!(page_flags & _PAGE_NX)) | |
2110 | cpa.mask_clr = __pgprot(_PAGE_NX); | |
2111 | ||
15f003d2 SP |
2112 | if (!(page_flags & _PAGE_RW)) |
2113 | cpa.mask_clr = __pgprot(_PAGE_RW); | |
2114 | ||
21729f81 TL |
2115 | if (!(page_flags & _PAGE_ENC)) |
2116 | cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); | |
2117 | ||
82f0712c BP |
2118 | cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); |
2119 | ||
2120 | retval = __change_page_attr_set_clr(&cpa, 0); | |
2121 | __flush_tlb_all(); | |
2122 | ||
2123 | out: | |
2124 | return retval; | |
2125 | } | |
2126 | ||
d1028a15 AV |
2127 | /* |
2128 | * The testcases use internal knowledge of the implementation that shouldn't | |
2129 | * be exposed to the rest of the kernel. Include these directly here. | |
2130 | */ | |
2131 | #ifdef CONFIG_CPA_DEBUG | |
2132 | #include "pageattr-test.c" | |
2133 | #endif |