Commit | Line | Data |
---|---|---|
2e5d9c85 | 1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
8 | */ | |
9 | ||
ad2cde16 IM |
10 | #include <linux/seq_file.h> |
11 | #include <linux/bootmem.h> | |
12 | #include <linux/debugfs.h> | |
2e5d9c85 | 13 | #include <linux/kernel.h> |
92b9af9e | 14 | #include <linux/module.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
ad2cde16 | 16 | #include <linux/mm.h> |
2e5d9c85 | 17 | #include <linux/fs.h> |
335ef896 | 18 | #include <linux/rbtree.h> |
2e5d9c85 | 19 | |
ad2cde16 | 20 | #include <asm/cacheflush.h> |
2e5d9c85 | 21 | #include <asm/processor.h> |
ad2cde16 | 22 | #include <asm/tlbflush.h> |
fd12a0d6 | 23 | #include <asm/x86_init.h> |
2e5d9c85 | 24 | #include <asm/pgtable.h> |
2e5d9c85 | 25 | #include <asm/fcntl.h> |
ad2cde16 | 26 | #include <asm/e820.h> |
2e5d9c85 | 27 | #include <asm/mtrr.h> |
ad2cde16 IM |
28 | #include <asm/page.h> |
29 | #include <asm/msr.h> | |
30 | #include <asm/pat.h> | |
e7f260a2 | 31 | #include <asm/io.h> |
2e5d9c85 | 32 | |
be5a0c12 | 33 | #include "pat_internal.h" |
34 | ||
8d4a4300 | 35 | #ifdef CONFIG_X86_PAT |
499f8f84 | 36 | int __read_mostly pat_enabled = 1; |
2e5d9c85 | 37 | |
1ee4bd92 | 38 | static inline void pat_disable(const char *reason) |
2e5d9c85 | 39 | { |
499f8f84 | 40 | pat_enabled = 0; |
8d4a4300 | 41 | printk(KERN_INFO "%s\n", reason); |
2e5d9c85 | 42 | } |
2e5d9c85 | 43 | |
be524fb9 | 44 | static int __init nopat(char *str) |
2e5d9c85 | 45 | { |
8d4a4300 | 46 | pat_disable("PAT support disabled."); |
2e5d9c85 | 47 | return 0; |
48 | } | |
8d4a4300 | 49 | early_param("nopat", nopat); |
75a04811 PA |
50 | #else |
51 | static inline void pat_disable(const char *reason) | |
52 | { | |
53 | (void)reason; | |
54 | } | |
8d4a4300 TG |
55 | #endif |
56 | ||
77b52b4c | 57 | |
be5a0c12 | 58 | int pat_debug_enable; |
ad2cde16 | 59 | |
77b52b4c VP |
60 | static int __init pat_debug_setup(char *str) |
61 | { | |
be5a0c12 | 62 | pat_debug_enable = 1; |
77b52b4c VP |
63 | return 0; |
64 | } | |
65 | __setup("debugpat", pat_debug_setup); | |
66 | ||
8d4a4300 | 67 | static u64 __read_mostly boot_pat_state; |
2e5d9c85 | 68 | |
69 | enum { | |
70 | PAT_UC = 0, /* uncached */ | |
71 | PAT_WC = 1, /* Write combining */ | |
72 | PAT_WT = 4, /* Write Through */ | |
73 | PAT_WP = 5, /* Write Protected */ | |
74 | PAT_WB = 6, /* Write Back (default) */ | |
75 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | |
76 | }; | |
77 | ||
cd7a4e93 | 78 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 79 | |
80 | void pat_init(void) | |
81 | { | |
82 | u64 pat; | |
e23a8b6a | 83 | bool boot_cpu = !boot_pat_state; |
2e5d9c85 | 84 | |
499f8f84 | 85 | if (!pat_enabled) |
2e5d9c85 | 86 | return; |
87 | ||
75a04811 PA |
88 | if (!cpu_has_pat) { |
89 | if (!boot_pat_state) { | |
90 | pat_disable("PAT not supported by CPU."); | |
91 | return; | |
92 | } else { | |
93 | /* | |
94 | * If this happens we are on a secondary CPU, but | |
95 | * switched to PAT on the boot CPU. We have no way to | |
96 | * undo PAT. | |
97 | */ | |
98 | printk(KERN_ERR "PAT enabled, " | |
99 | "but not supported by secondary CPU\n"); | |
100 | BUG(); | |
101 | } | |
8d4a4300 | 102 | } |
2e5d9c85 | 103 | |
104 | /* Set PWT to Write-Combining. All other bits stay the same */ | |
105 | /* | |
106 | * PTE encoding used in Linux: | |
107 | * PAT | |
108 | * |PCD | |
109 | * ||PWT | |
110 | * ||| | |
111 | * 000 WB _PAGE_CACHE_WB | |
112 | * 001 WC _PAGE_CACHE_WC | |
113 | * 010 UC- _PAGE_CACHE_UC_MINUS | |
114 | * 011 UC _PAGE_CACHE_UC | |
115 | * PAT bit unused | |
116 | */ | |
cd7a4e93 AH |
117 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
118 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
2e5d9c85 | 119 | |
120 | /* Boot CPU check */ | |
8d4a4300 | 121 | if (!boot_pat_state) |
2e5d9c85 | 122 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); |
2e5d9c85 | 123 | |
124 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
e23a8b6a RD |
125 | |
126 | if (boot_cpu) | |
127 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | |
128 | smp_processor_id(), boot_pat_state, pat); | |
2e5d9c85 | 129 | } |
130 | ||
131 | #undef PAT | |
132 | ||
9e41a49a | 133 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
335ef896 | 134 | |
2e5d9c85 | 135 | /* |
136 | * Does intersection of PAT memory type and MTRR memory type and returns | |
137 | * the resulting memory type as PAT understands it. | |
138 | * (Type in pat and mtrr will not have same value) | |
139 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
140 | * SDM vol 3a | |
141 | */ | |
6cf514fc | 142 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
2e5d9c85 | 143 | { |
c26421d0 VP |
144 | /* |
145 | * Look for MTRR hint to get the effective type in case where PAT | |
146 | * request is for WB. | |
147 | */ | |
dd0c7c49 AH |
148 | if (req_type == _PAGE_CACHE_WB) { |
149 | u8 mtrr_type; | |
150 | ||
151 | mtrr_type = mtrr_type_lookup(start, end); | |
b6ff32d9 SS |
152 | if (mtrr_type != MTRR_TYPE_WRBACK) |
153 | return _PAGE_CACHE_UC_MINUS; | |
154 | ||
155 | return _PAGE_CACHE_WB; | |
dd0c7c49 AH |
156 | } |
157 | ||
158 | return req_type; | |
2e5d9c85 | 159 | } |
160 | ||
be03d9e8 SS |
161 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) |
162 | { | |
163 | int ram_page = 0, not_rampage = 0; | |
164 | unsigned long page_nr; | |
165 | ||
166 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | |
167 | ++page_nr) { | |
168 | /* | |
169 | * For legacy reasons, physical address range in the legacy ISA | |
170 | * region is tracked as non-RAM. This will allow users of | |
171 | * /dev/mem to map portions of legacy ISA region, even when | |
172 | * some of those portions are listed(or not even listed) with | |
173 | * different e820 types(RAM/reserved/..) | |
174 | */ | |
175 | if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && | |
176 | page_is_ram(page_nr)) | |
177 | ram_page = 1; | |
178 | else | |
179 | not_rampage = 1; | |
180 | ||
181 | if (ram_page == not_rampage) | |
182 | return -1; | |
183 | } | |
184 | ||
185 | return ram_page; | |
186 | } | |
187 | ||
9542ada8 | 188 | /* |
f5841740 VP |
189 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
190 | * Here we do two pass: | |
191 | * - Find the memtype of all the pages in the range, look for any conflicts | |
192 | * - In case of no conflicts, set the new memtype for pages in the range | |
9542ada8 SS |
193 | */ |
194 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | |
ad2cde16 | 195 | unsigned long *new_type) |
9542ada8 SS |
196 | { |
197 | struct page *page; | |
f5841740 VP |
198 | u64 pfn; |
199 | ||
200 | if (req_type == _PAGE_CACHE_UC) { | |
201 | /* We do not support strong UC */ | |
202 | WARN_ON_ONCE(1); | |
203 | req_type = _PAGE_CACHE_UC_MINUS; | |
204 | } | |
9542ada8 SS |
205 | |
206 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
f5841740 | 207 | unsigned long type; |
9542ada8 | 208 | |
f5841740 VP |
209 | page = pfn_to_page(pfn); |
210 | type = get_page_memtype(page); | |
211 | if (type != -1) { | |
212 | printk(KERN_INFO "reserve_ram_pages_type failed " | |
213 | "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", | |
214 | start, end, type, req_type); | |
215 | if (new_type) | |
216 | *new_type = type; | |
217 | ||
218 | return -EBUSY; | |
219 | } | |
9542ada8 | 220 | } |
9542ada8 | 221 | |
f5841740 VP |
222 | if (new_type) |
223 | *new_type = req_type; | |
224 | ||
225 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
9542ada8 | 226 | page = pfn_to_page(pfn); |
f5841740 | 227 | set_page_memtype(page, req_type); |
9542ada8 | 228 | } |
f5841740 | 229 | return 0; |
9542ada8 SS |
230 | } |
231 | ||
232 | static int free_ram_pages_type(u64 start, u64 end) | |
233 | { | |
234 | struct page *page; | |
f5841740 | 235 | u64 pfn; |
9542ada8 SS |
236 | |
237 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
238 | page = pfn_to_page(pfn); | |
f5841740 | 239 | set_page_memtype(page, -1); |
9542ada8 SS |
240 | } |
241 | return 0; | |
9542ada8 SS |
242 | } |
243 | ||
e7f260a2 | 244 | /* |
245 | * req_type typically has one of the: | |
246 | * - _PAGE_CACHE_WB | |
247 | * - _PAGE_CACHE_WC | |
248 | * - _PAGE_CACHE_UC_MINUS | |
249 | * - _PAGE_CACHE_UC | |
250 | * | |
ac97991e AH |
251 | * If new_type is NULL, function will return an error if it cannot reserve the |
252 | * region with req_type. If new_type is non-NULL, function will return | |
253 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 254 | * it will return a negative return value. |
255 | */ | |
2e5d9c85 | 256 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
ad2cde16 | 257 | unsigned long *new_type) |
2e5d9c85 | 258 | { |
be5a0c12 | 259 | struct memtype *new; |
2e5d9c85 | 260 | unsigned long actual_type; |
9542ada8 | 261 | int is_range_ram; |
ad2cde16 | 262 | int err = 0; |
2e5d9c85 | 263 | |
ad2cde16 | 264 | BUG_ON(start >= end); /* end is exclusive */ |
69e26be9 | 265 | |
499f8f84 | 266 | if (!pat_enabled) { |
e7f260a2 | 267 | /* This is identical to page table setting without PAT */ |
ac97991e | 268 | if (new_type) { |
83ea05ea | 269 | if (req_type == _PAGE_CACHE_WC) |
5fc51746 | 270 | *new_type = _PAGE_CACHE_UC_MINUS; |
ac97991e AH |
271 | else |
272 | *new_type = req_type & _PAGE_CACHE_MASK; | |
e7f260a2 | 273 | } |
2e5d9c85 | 274 | return 0; |
275 | } | |
276 | ||
277 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
8a271389 | 278 | if (x86_platform.is_untracked_pat_range(start, end)) { |
ac97991e AH |
279 | if (new_type) |
280 | *new_type = _PAGE_CACHE_WB; | |
2e5d9c85 | 281 | return 0; |
282 | } | |
283 | ||
b6ff32d9 SS |
284 | /* |
285 | * Call mtrr_lookup to get the type hint. This is an | |
286 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
287 | * tools and ACPI tools). Use WB request for WB memory and use | |
288 | * UC_MINUS otherwise. | |
289 | */ | |
290 | actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); | |
2e5d9c85 | 291 | |
95971342 SS |
292 | if (new_type) |
293 | *new_type = actual_type; | |
294 | ||
be03d9e8 | 295 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
296 | if (is_range_ram == 1) { |
297 | ||
f5841740 | 298 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
f5841740 VP |
299 | |
300 | return err; | |
301 | } else if (is_range_ram < 0) { | |
9542ada8 | 302 | return -EINVAL; |
f5841740 | 303 | } |
9542ada8 | 304 | |
ac97991e AH |
305 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
306 | if (!new) | |
2e5d9c85 | 307 | return -ENOMEM; |
308 | ||
ad2cde16 IM |
309 | new->start = start; |
310 | new->end = end; | |
311 | new->type = actual_type; | |
2e5d9c85 | 312 | |
2e5d9c85 | 313 | spin_lock(&memtype_lock); |
314 | ||
9e41a49a | 315 | err = rbt_memtype_check_insert(new, new_type); |
2e5d9c85 | 316 | if (err) { |
3e9c83b3 AH |
317 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " |
318 | "track %s, req %s\n", | |
319 | start, end, cattr_name(new->type), cattr_name(req_type)); | |
ac97991e | 320 | kfree(new); |
2e5d9c85 | 321 | spin_unlock(&memtype_lock); |
ad2cde16 | 322 | |
2e5d9c85 | 323 | return err; |
324 | } | |
325 | ||
2e5d9c85 | 326 | spin_unlock(&memtype_lock); |
3e9c83b3 AH |
327 | |
328 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | |
329 | start, end, cattr_name(new->type), cattr_name(req_type), | |
330 | new_type ? cattr_name(*new_type) : "-"); | |
331 | ||
2e5d9c85 | 332 | return err; |
333 | } | |
334 | ||
335 | int free_memtype(u64 start, u64 end) | |
336 | { | |
2e5d9c85 | 337 | int err = -EINVAL; |
9542ada8 | 338 | int is_range_ram; |
2e5d9c85 | 339 | |
69e26be9 | 340 | if (!pat_enabled) |
2e5d9c85 | 341 | return 0; |
2e5d9c85 | 342 | |
343 | /* Low ISA region is always mapped WB. No need to track */ | |
8a271389 | 344 | if (x86_platform.is_untracked_pat_range(start, end)) |
2e5d9c85 | 345 | return 0; |
2e5d9c85 | 346 | |
be03d9e8 | 347 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
348 | if (is_range_ram == 1) { |
349 | ||
f5841740 | 350 | err = free_ram_pages_type(start, end); |
f5841740 VP |
351 | |
352 | return err; | |
353 | } else if (is_range_ram < 0) { | |
9542ada8 | 354 | return -EINVAL; |
f5841740 | 355 | } |
9542ada8 | 356 | |
2e5d9c85 | 357 | spin_lock(&memtype_lock); |
9e41a49a | 358 | err = rbt_memtype_erase(start, end); |
2e5d9c85 | 359 | spin_unlock(&memtype_lock); |
360 | ||
361 | if (err) { | |
28eb559b | 362 | printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", |
2e5d9c85 | 363 | current->comm, current->pid, start, end); |
364 | } | |
6997ab49 | 365 | |
77b52b4c | 366 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); |
ad2cde16 | 367 | |
2e5d9c85 | 368 | return err; |
369 | } | |
370 | ||
f0970c13 | 371 | |
637b86e7 VP |
372 | /** |
373 | * lookup_memtype - Looksup the memory type for a physical address | |
374 | * @paddr: physical address of which memory type needs to be looked up | |
375 | * | |
376 | * Only to be called when PAT is enabled | |
377 | * | |
378 | * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or | |
379 | * _PAGE_CACHE_UC | |
380 | */ | |
381 | static unsigned long lookup_memtype(u64 paddr) | |
382 | { | |
383 | int rettype = _PAGE_CACHE_WB; | |
384 | struct memtype *entry; | |
385 | ||
8a271389 | 386 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
637b86e7 VP |
387 | return rettype; |
388 | ||
389 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | |
390 | struct page *page; | |
637b86e7 VP |
391 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
392 | rettype = get_page_memtype(page); | |
637b86e7 VP |
393 | /* |
394 | * -1 from get_page_memtype() implies RAM page is in its | |
395 | * default state and not reserved, and hence of type WB | |
396 | */ | |
397 | if (rettype == -1) | |
398 | rettype = _PAGE_CACHE_WB; | |
399 | ||
400 | return rettype; | |
401 | } | |
402 | ||
403 | spin_lock(&memtype_lock); | |
404 | ||
9e41a49a | 405 | entry = rbt_memtype_lookup(paddr); |
637b86e7 VP |
406 | if (entry != NULL) |
407 | rettype = entry->type; | |
408 | else | |
409 | rettype = _PAGE_CACHE_UC_MINUS; | |
410 | ||
411 | spin_unlock(&memtype_lock); | |
412 | return rettype; | |
413 | } | |
414 | ||
9fd126bc VP |
415 | /** |
416 | * io_reserve_memtype - Request a memory type mapping for a region of memory | |
417 | * @start: start (physical address) of the region | |
418 | * @end: end (physical address) of the region | |
419 | * @type: A pointer to memtype, with requested type. On success, requested | |
420 | * or any other compatible type that was available for the region is returned | |
421 | * | |
422 | * On success, returns 0 | |
423 | * On failure, returns non-zero | |
424 | */ | |
425 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | |
426 | unsigned long *type) | |
427 | { | |
b855192c | 428 | resource_size_t size = end - start; |
9fd126bc VP |
429 | unsigned long req_type = *type; |
430 | unsigned long new_type; | |
431 | int ret; | |
432 | ||
b855192c | 433 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
9fd126bc VP |
434 | |
435 | ret = reserve_memtype(start, end, req_type, &new_type); | |
436 | if (ret) | |
437 | goto out_err; | |
438 | ||
b855192c | 439 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
9fd126bc VP |
440 | goto out_free; |
441 | ||
b855192c | 442 | if (kernel_map_sync_memtype(start, size, new_type) < 0) |
9fd126bc VP |
443 | goto out_free; |
444 | ||
445 | *type = new_type; | |
446 | return 0; | |
447 | ||
448 | out_free: | |
449 | free_memtype(start, end); | |
450 | ret = -EBUSY; | |
451 | out_err: | |
452 | return ret; | |
453 | } | |
454 | ||
455 | /** | |
456 | * io_free_memtype - Release a memory type mapping for a region of memory | |
457 | * @start: start (physical address) of the region | |
458 | * @end: end (physical address) of the region | |
459 | */ | |
460 | void io_free_memtype(resource_size_t start, resource_size_t end) | |
461 | { | |
462 | free_memtype(start, end); | |
463 | } | |
464 | ||
f0970c13 | 465 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
466 | unsigned long size, pgprot_t vma_prot) | |
467 | { | |
468 | return vma_prot; | |
469 | } | |
470 | ||
d092633b IM |
471 | #ifdef CONFIG_STRICT_DEVMEM |
472 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ | |
0124cecf VP |
473 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
474 | { | |
475 | return 1; | |
476 | } | |
477 | #else | |
9e41bff2 | 478 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
0124cecf VP |
479 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
480 | { | |
481 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
482 | u64 to = from + size; | |
483 | u64 cursor = from; | |
484 | ||
9e41bff2 RT |
485 | if (!pat_enabled) |
486 | return 1; | |
487 | ||
0124cecf VP |
488 | while (cursor < to) { |
489 | if (!devmem_is_allowed(pfn)) { | |
490 | printk(KERN_INFO | |
491 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | |
492 | current->comm, from, to); | |
493 | return 0; | |
494 | } | |
495 | cursor += PAGE_SIZE; | |
496 | pfn++; | |
497 | } | |
498 | return 1; | |
499 | } | |
d092633b | 500 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 501 | |
f0970c13 | 502 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
503 | unsigned long size, pgprot_t *vma_prot) | |
504 | { | |
0c3c8a18 | 505 | unsigned long flags = _PAGE_CACHE_WB; |
f0970c13 | 506 | |
0124cecf VP |
507 | if (!range_is_allowed(pfn, size)) |
508 | return 0; | |
509 | ||
6b2f3d1f | 510 | if (file->f_flags & O_DSYNC) |
28df82eb | 511 | flags = _PAGE_CACHE_UC_MINUS; |
f0970c13 | 512 | |
513 | #ifdef CONFIG_X86_32 | |
514 | /* | |
515 | * On the PPro and successors, the MTRRs are used to set | |
516 | * memory types for physical addresses outside main memory, | |
517 | * so blindly setting UC or PWT on those pages is wrong. | |
518 | * For Pentiums and earlier, the surround logic should disable | |
519 | * caching for the high addresses through the KEN pin, but | |
520 | * we maintain the tradition of paranoia in this code. | |
521 | */ | |
499f8f84 | 522 | if (!pat_enabled && |
cd7a4e93 AH |
523 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
524 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | |
525 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | |
526 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | |
527 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | |
e7f260a2 | 528 | flags = _PAGE_CACHE_UC; |
f0970c13 | 529 | } |
530 | #endif | |
531 | ||
e7f260a2 | 532 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
533 | flags); | |
f0970c13 | 534 | return 1; |
535 | } | |
e7f260a2 | 536 | |
7880f746 VP |
537 | /* |
538 | * Change the memory type for the physial address range in kernel identity | |
539 | * mapping space if that range is a part of identity map. | |
540 | */ | |
541 | int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) | |
542 | { | |
543 | unsigned long id_sz; | |
544 | ||
5fc51746 | 545 | if (base >= __pa(high_memory)) |
7880f746 VP |
546 | return 0; |
547 | ||
548 | id_sz = (__pa(high_memory) < base + size) ? | |
549 | __pa(high_memory) - base : | |
550 | size; | |
551 | ||
552 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { | |
553 | printk(KERN_INFO | |
554 | "%s:%d ioremap_change_attr failed %s " | |
555 | "for %Lx-%Lx\n", | |
556 | current->comm, current->pid, | |
557 | cattr_name(flags), | |
558 | base, (unsigned long long)(base + size)); | |
559 | return -EINVAL; | |
560 | } | |
561 | return 0; | |
562 | } | |
563 | ||
5899329b | 564 | /* |
565 | * Internal interface to reserve a range of physical memory with prot. | |
566 | * Reserved non RAM regions only and after successful reserve_memtype, | |
567 | * this func also keeps identity mapping (if any) in sync with this new prot. | |
568 | */ | |
cdecff68 | 569 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
570 | int strict_prot) | |
5899329b | 571 | { |
572 | int is_ram = 0; | |
7880f746 | 573 | int ret; |
cdecff68 | 574 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); |
0c3c8a18 | 575 | unsigned long flags = want_flags; |
5899329b | 576 | |
be03d9e8 | 577 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 578 | |
be03d9e8 | 579 | /* |
d886c73c VP |
580 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
581 | * track of number of mappings of RAM pages. We can assert that | |
582 | * the type requested matches the type of first page in the range. | |
be03d9e8 | 583 | */ |
d886c73c VP |
584 | if (is_ram) { |
585 | if (!pat_enabled) | |
586 | return 0; | |
587 | ||
588 | flags = lookup_memtype(paddr); | |
589 | if (want_flags != flags) { | |
590 | printk(KERN_WARNING | |
591 | "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", | |
592 | current->comm, current->pid, | |
593 | cattr_name(want_flags), | |
594 | (unsigned long long)paddr, | |
595 | (unsigned long long)(paddr + size), | |
596 | cattr_name(flags)); | |
597 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
598 | (~_PAGE_CACHE_MASK)) | | |
599 | flags); | |
600 | } | |
4bb9c5c0 | 601 | return 0; |
d886c73c | 602 | } |
5899329b | 603 | |
604 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | |
605 | if (ret) | |
606 | return ret; | |
607 | ||
608 | if (flags != want_flags) { | |
1adcaafe SS |
609 | if (strict_prot || |
610 | !is_new_memtype_allowed(paddr, size, want_flags, flags)) { | |
cdecff68 | 611 | free_memtype(paddr, paddr + size); |
612 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | |
613 | " for %Lx-%Lx, got %s\n", | |
614 | current->comm, current->pid, | |
615 | cattr_name(want_flags), | |
616 | (unsigned long long)paddr, | |
617 | (unsigned long long)(paddr + size), | |
618 | cattr_name(flags)); | |
619 | return -EINVAL; | |
620 | } | |
621 | /* | |
622 | * We allow returning different type than the one requested in | |
623 | * non strict case. | |
624 | */ | |
625 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
626 | (~_PAGE_CACHE_MASK)) | | |
627 | flags); | |
5899329b | 628 | } |
629 | ||
7880f746 | 630 | if (kernel_map_sync_memtype(paddr, size, flags) < 0) { |
5899329b | 631 | free_memtype(paddr, paddr + size); |
5899329b | 632 | return -EINVAL; |
633 | } | |
634 | return 0; | |
635 | } | |
636 | ||
637 | /* | |
638 | * Internal interface to free a range of physical memory. | |
639 | * Frees non RAM regions only. | |
640 | */ | |
641 | static void free_pfn_range(u64 paddr, unsigned long size) | |
642 | { | |
643 | int is_ram; | |
644 | ||
be03d9e8 | 645 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 646 | if (is_ram == 0) |
647 | free_memtype(paddr, paddr + size); | |
648 | } | |
649 | ||
650 | /* | |
651 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | |
652 | * copied through copy_page_range(). | |
653 | * | |
654 | * If the vma has a linear pfn mapping for the entire range, we get the prot | |
655 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | |
5899329b | 656 | */ |
657 | int track_pfn_vma_copy(struct vm_area_struct *vma) | |
658 | { | |
c1c15b65 | 659 | resource_size_t paddr; |
982d789a | 660 | unsigned long prot; |
4b065046 | 661 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
cdecff68 | 662 | pgprot_t pgprot; |
5899329b | 663 | |
5899329b | 664 | if (is_linear_pfn_mapping(vma)) { |
665 | /* | |
982d789a | 666 | * reserve the whole chunk covered by vma. We need the |
667 | * starting address and protection from pte. | |
5899329b | 668 | */ |
4b065046 | 669 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
5899329b | 670 | WARN_ON_ONCE(1); |
982d789a | 671 | return -EINVAL; |
5899329b | 672 | } |
cdecff68 | 673 | pgprot = __pgprot(prot); |
674 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | |
5899329b | 675 | } |
676 | ||
5899329b | 677 | return 0; |
5899329b | 678 | } |
679 | ||
680 | /* | |
681 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | |
682 | * for physical range indicated by pfn and size. | |
683 | * | |
684 | * prot is passed in as a parameter for the new mapping. If the vma has a | |
685 | * linear pfn mapping for the entire range reserve the entire vma range with | |
686 | * single reserve_pfn_range call. | |
5899329b | 687 | */ |
e4b866ed | 688 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
5899329b | 689 | unsigned long pfn, unsigned long size) |
690 | { | |
10876376 | 691 | unsigned long flags; |
c1c15b65 | 692 | resource_size_t paddr; |
4b065046 | 693 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
5899329b | 694 | |
5899329b | 695 | if (is_linear_pfn_mapping(vma)) { |
696 | /* reserve the whole chunk starting from vm_pgoff */ | |
c1c15b65 | 697 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
cdecff68 | 698 | return reserve_pfn_range(paddr, vma_size, prot, 0); |
5899329b | 699 | } |
700 | ||
10876376 VP |
701 | if (!pat_enabled) |
702 | return 0; | |
703 | ||
704 | /* for vm_insert_pfn and friends, we set prot based on lookup */ | |
705 | flags = lookup_memtype(pfn << PAGE_SHIFT); | |
706 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | |
707 | flags); | |
708 | ||
5899329b | 709 | return 0; |
5899329b | 710 | } |
711 | ||
712 | /* | |
713 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | |
714 | * untrack can be called for a specific region indicated by pfn and size or | |
715 | * can be for the entire vma (in which case size can be zero). | |
716 | */ | |
717 | void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |
718 | unsigned long size) | |
719 | { | |
c1c15b65 | 720 | resource_size_t paddr; |
4b065046 | 721 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
5899329b | 722 | |
5899329b | 723 | if (is_linear_pfn_mapping(vma)) { |
724 | /* free the whole chunk starting from vm_pgoff */ | |
c1c15b65 | 725 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
5899329b | 726 | free_pfn_range(paddr, vma_size); |
727 | return; | |
728 | } | |
5899329b | 729 | } |
730 | ||
2520bd31 | 731 | pgprot_t pgprot_writecombine(pgprot_t prot) |
732 | { | |
733 | if (pat_enabled) | |
734 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); | |
735 | else | |
736 | return pgprot_noncached(prot); | |
737 | } | |
92b9af9e | 738 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
2520bd31 | 739 | |
012f09e7 | 740 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 741 | |
fec0962e | 742 | static struct memtype *memtype_get_idx(loff_t pos) |
743 | { | |
be5a0c12 | 744 | struct memtype *print_entry; |
745 | int ret; | |
fec0962e | 746 | |
be5a0c12 | 747 | print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
fec0962e | 748 | if (!print_entry) |
749 | return NULL; | |
750 | ||
751 | spin_lock(&memtype_lock); | |
9e41a49a | 752 | ret = rbt_memtype_copy_nth_element(print_entry, pos); |
fec0962e | 753 | spin_unlock(&memtype_lock); |
ad2cde16 | 754 | |
be5a0c12 | 755 | if (!ret) { |
756 | return print_entry; | |
757 | } else { | |
758 | kfree(print_entry); | |
759 | return NULL; | |
760 | } | |
fec0962e | 761 | } |
762 | ||
763 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
764 | { | |
765 | if (*pos == 0) { | |
766 | ++*pos; | |
767 | seq_printf(seq, "PAT memtype list:\n"); | |
768 | } | |
769 | ||
770 | return memtype_get_idx(*pos); | |
771 | } | |
772 | ||
773 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
774 | { | |
775 | ++*pos; | |
776 | return memtype_get_idx(*pos); | |
777 | } | |
778 | ||
779 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
780 | { | |
781 | } | |
782 | ||
783 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
784 | { | |
785 | struct memtype *print_entry = (struct memtype *)v; | |
786 | ||
787 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | |
788 | print_entry->start, print_entry->end); | |
789 | kfree(print_entry); | |
ad2cde16 | 790 | |
fec0962e | 791 | return 0; |
792 | } | |
793 | ||
d535e431 | 794 | static const struct seq_operations memtype_seq_ops = { |
fec0962e | 795 | .start = memtype_seq_start, |
796 | .next = memtype_seq_next, | |
797 | .stop = memtype_seq_stop, | |
798 | .show = memtype_seq_show, | |
799 | }; | |
800 | ||
801 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
802 | { | |
803 | return seq_open(file, &memtype_seq_ops); | |
804 | } | |
805 | ||
806 | static const struct file_operations memtype_fops = { | |
807 | .open = memtype_seq_open, | |
808 | .read = seq_read, | |
809 | .llseek = seq_lseek, | |
810 | .release = seq_release, | |
811 | }; | |
812 | ||
813 | static int __init pat_memtype_list_init(void) | |
814 | { | |
dd4377b0 XF |
815 | if (pat_enabled) { |
816 | debugfs_create_file("pat_memtype_list", S_IRUSR, | |
817 | arch_debugfs_dir, NULL, &memtype_fops); | |
818 | } | |
fec0962e | 819 | return 0; |
820 | } | |
821 | ||
822 | late_initcall(pat_memtype_list_init); | |
823 | ||
012f09e7 | 824 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |