Commit | Line | Data |
---|---|---|
2e5d9c85 | 1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
8 | */ | |
9 | ||
ad2cde16 IM |
10 | #include <linux/seq_file.h> |
11 | #include <linux/bootmem.h> | |
12 | #include <linux/debugfs.h> | |
2e5d9c85 | 13 | #include <linux/kernel.h> |
92b9af9e | 14 | #include <linux/module.h> |
2e5d9c85 | 15 | #include <linux/gfp.h> |
ad2cde16 | 16 | #include <linux/mm.h> |
2e5d9c85 | 17 | #include <linux/fs.h> |
335ef896 | 18 | #include <linux/rbtree.h> |
2e5d9c85 | 19 | |
ad2cde16 | 20 | #include <asm/cacheflush.h> |
2e5d9c85 | 21 | #include <asm/processor.h> |
ad2cde16 | 22 | #include <asm/tlbflush.h> |
fd12a0d6 | 23 | #include <asm/x86_init.h> |
2e5d9c85 | 24 | #include <asm/pgtable.h> |
2e5d9c85 | 25 | #include <asm/fcntl.h> |
ad2cde16 | 26 | #include <asm/e820.h> |
2e5d9c85 | 27 | #include <asm/mtrr.h> |
ad2cde16 IM |
28 | #include <asm/page.h> |
29 | #include <asm/msr.h> | |
30 | #include <asm/pat.h> | |
e7f260a2 | 31 | #include <asm/io.h> |
2e5d9c85 | 32 | |
8d4a4300 | 33 | #ifdef CONFIG_X86_PAT |
499f8f84 | 34 | int __read_mostly pat_enabled = 1; |
2e5d9c85 | 35 | |
1ee4bd92 | 36 | static inline void pat_disable(const char *reason) |
2e5d9c85 | 37 | { |
499f8f84 | 38 | pat_enabled = 0; |
8d4a4300 | 39 | printk(KERN_INFO "%s\n", reason); |
2e5d9c85 | 40 | } |
2e5d9c85 | 41 | |
be524fb9 | 42 | static int __init nopat(char *str) |
2e5d9c85 | 43 | { |
8d4a4300 | 44 | pat_disable("PAT support disabled."); |
2e5d9c85 | 45 | return 0; |
46 | } | |
8d4a4300 | 47 | early_param("nopat", nopat); |
75a04811 PA |
48 | #else |
49 | static inline void pat_disable(const char *reason) | |
50 | { | |
51 | (void)reason; | |
52 | } | |
8d4a4300 TG |
53 | #endif |
54 | ||
77b52b4c VP |
55 | |
56 | static int debug_enable; | |
ad2cde16 | 57 | |
77b52b4c VP |
58 | static int __init pat_debug_setup(char *str) |
59 | { | |
60 | debug_enable = 1; | |
61 | return 0; | |
62 | } | |
63 | __setup("debugpat", pat_debug_setup); | |
64 | ||
65 | #define dprintk(fmt, arg...) \ | |
66 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | |
67 | ||
68 | ||
8d4a4300 | 69 | static u64 __read_mostly boot_pat_state; |
2e5d9c85 | 70 | |
71 | enum { | |
72 | PAT_UC = 0, /* uncached */ | |
73 | PAT_WC = 1, /* Write combining */ | |
74 | PAT_WT = 4, /* Write Through */ | |
75 | PAT_WP = 5, /* Write Protected */ | |
76 | PAT_WB = 6, /* Write Back (default) */ | |
77 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | |
78 | }; | |
79 | ||
cd7a4e93 | 80 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 81 | |
82 | void pat_init(void) | |
83 | { | |
84 | u64 pat; | |
e23a8b6a | 85 | bool boot_cpu = !boot_pat_state; |
2e5d9c85 | 86 | |
499f8f84 | 87 | if (!pat_enabled) |
2e5d9c85 | 88 | return; |
89 | ||
75a04811 PA |
90 | if (!cpu_has_pat) { |
91 | if (!boot_pat_state) { | |
92 | pat_disable("PAT not supported by CPU."); | |
93 | return; | |
94 | } else { | |
95 | /* | |
96 | * If this happens we are on a secondary CPU, but | |
97 | * switched to PAT on the boot CPU. We have no way to | |
98 | * undo PAT. | |
99 | */ | |
100 | printk(KERN_ERR "PAT enabled, " | |
101 | "but not supported by secondary CPU\n"); | |
102 | BUG(); | |
103 | } | |
8d4a4300 | 104 | } |
2e5d9c85 | 105 | |
106 | /* Set PWT to Write-Combining. All other bits stay the same */ | |
107 | /* | |
108 | * PTE encoding used in Linux: | |
109 | * PAT | |
110 | * |PCD | |
111 | * ||PWT | |
112 | * ||| | |
113 | * 000 WB _PAGE_CACHE_WB | |
114 | * 001 WC _PAGE_CACHE_WC | |
115 | * 010 UC- _PAGE_CACHE_UC_MINUS | |
116 | * 011 UC _PAGE_CACHE_UC | |
117 | * PAT bit unused | |
118 | */ | |
cd7a4e93 AH |
119 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
120 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
2e5d9c85 | 121 | |
122 | /* Boot CPU check */ | |
8d4a4300 | 123 | if (!boot_pat_state) |
2e5d9c85 | 124 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); |
2e5d9c85 | 125 | |
126 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
e23a8b6a RD |
127 | |
128 | if (boot_cpu) | |
129 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | |
130 | smp_processor_id(), boot_pat_state, pat); | |
2e5d9c85 | 131 | } |
132 | ||
133 | #undef PAT | |
134 | ||
135 | static char *cattr_name(unsigned long flags) | |
136 | { | |
137 | switch (flags & _PAGE_CACHE_MASK) { | |
cd7a4e93 AH |
138 | case _PAGE_CACHE_UC: return "uncached"; |
139 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | |
140 | case _PAGE_CACHE_WB: return "write-back"; | |
141 | case _PAGE_CACHE_WC: return "write-combining"; | |
142 | default: return "broken"; | |
2e5d9c85 | 143 | } |
144 | } | |
145 | ||
146 | /* | |
147 | * The global memtype list keeps track of memory type for specific | |
148 | * physical memory areas. Conflicting memory types in different | |
149 | * mappings can cause CPU cache corruption. To avoid this we keep track. | |
150 | * | |
151 | * The list is sorted based on starting address and can contain multiple | |
152 | * entries for each address (this allows reference counting for overlapping | |
153 | * areas). All the aliases have the same cache attributes of course. | |
154 | * Zero attributes are represented as holes. | |
155 | * | |
335ef896 VP |
156 | * The data structure is a list that is also organized as an rbtree |
157 | * sorted on the start address of memtype range. | |
2e5d9c85 | 158 | * |
335ef896 | 159 | * memtype_lock protects both the linear list and rbtree. |
2e5d9c85 | 160 | */ |
161 | ||
162 | struct memtype { | |
ad2cde16 IM |
163 | u64 start; |
164 | u64 end; | |
165 | unsigned long type; | |
166 | struct list_head nd; | |
335ef896 | 167 | struct rb_node rb; |
2e5d9c85 | 168 | }; |
169 | ||
335ef896 | 170 | static struct rb_root memtype_rbroot = RB_ROOT; |
2e5d9c85 | 171 | static LIST_HEAD(memtype_list); |
ad2cde16 | 172 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ |
2e5d9c85 | 173 | |
335ef896 VP |
174 | static struct memtype *memtype_rb_search(struct rb_root *root, u64 start) |
175 | { | |
176 | struct rb_node *node = root->rb_node; | |
177 | struct memtype *last_lower = NULL; | |
178 | ||
179 | while (node) { | |
180 | struct memtype *data = container_of(node, struct memtype, rb); | |
181 | ||
182 | if (data->start < start) { | |
183 | last_lower = data; | |
184 | node = node->rb_right; | |
185 | } else if (data->start > start) { | |
186 | node = node->rb_left; | |
187 | } else | |
188 | return data; | |
189 | } | |
190 | ||
191 | /* Will return NULL if there is no entry with its start <= start */ | |
192 | return last_lower; | |
193 | } | |
194 | ||
195 | static void memtype_rb_insert(struct rb_root *root, struct memtype *data) | |
196 | { | |
197 | struct rb_node **new = &(root->rb_node); | |
198 | struct rb_node *parent = NULL; | |
199 | ||
200 | while (*new) { | |
201 | struct memtype *this = container_of(*new, struct memtype, rb); | |
202 | ||
203 | parent = *new; | |
204 | if (data->start <= this->start) | |
205 | new = &((*new)->rb_left); | |
206 | else if (data->start > this->start) | |
207 | new = &((*new)->rb_right); | |
208 | } | |
209 | ||
210 | rb_link_node(&data->rb, parent, new); | |
211 | rb_insert_color(&data->rb, root); | |
212 | } | |
213 | ||
2e5d9c85 | 214 | /* |
215 | * Does intersection of PAT memory type and MTRR memory type and returns | |
216 | * the resulting memory type as PAT understands it. | |
217 | * (Type in pat and mtrr will not have same value) | |
218 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
219 | * SDM vol 3a | |
220 | */ | |
6cf514fc | 221 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
2e5d9c85 | 222 | { |
c26421d0 VP |
223 | /* |
224 | * Look for MTRR hint to get the effective type in case where PAT | |
225 | * request is for WB. | |
226 | */ | |
dd0c7c49 AH |
227 | if (req_type == _PAGE_CACHE_WB) { |
228 | u8 mtrr_type; | |
229 | ||
230 | mtrr_type = mtrr_type_lookup(start, end); | |
b6ff32d9 SS |
231 | if (mtrr_type != MTRR_TYPE_WRBACK) |
232 | return _PAGE_CACHE_UC_MINUS; | |
233 | ||
234 | return _PAGE_CACHE_WB; | |
dd0c7c49 AH |
235 | } |
236 | ||
237 | return req_type; | |
2e5d9c85 | 238 | } |
239 | ||
ad2cde16 IM |
240 | static int |
241 | chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | |
64fe44c3 AH |
242 | { |
243 | if (new->type != entry->type) { | |
244 | if (type) { | |
245 | new->type = entry->type; | |
246 | *type = entry->type; | |
247 | } else | |
248 | goto conflict; | |
249 | } | |
250 | ||
251 | /* check overlaps with more than one entry in the list */ | |
252 | list_for_each_entry_continue(entry, &memtype_list, nd) { | |
253 | if (new->end <= entry->start) | |
254 | break; | |
255 | else if (new->type != entry->type) | |
256 | goto conflict; | |
257 | } | |
258 | return 0; | |
259 | ||
260 | conflict: | |
261 | printk(KERN_INFO "%s:%d conflicting memory types " | |
262 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, | |
263 | new->end, cattr_name(new->type), cattr_name(entry->type)); | |
264 | return -EBUSY; | |
265 | } | |
266 | ||
be03d9e8 SS |
267 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) |
268 | { | |
269 | int ram_page = 0, not_rampage = 0; | |
270 | unsigned long page_nr; | |
271 | ||
272 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | |
273 | ++page_nr) { | |
274 | /* | |
275 | * For legacy reasons, physical address range in the legacy ISA | |
276 | * region is tracked as non-RAM. This will allow users of | |
277 | * /dev/mem to map portions of legacy ISA region, even when | |
278 | * some of those portions are listed(or not even listed) with | |
279 | * different e820 types(RAM/reserved/..) | |
280 | */ | |
281 | if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && | |
282 | page_is_ram(page_nr)) | |
283 | ram_page = 1; | |
284 | else | |
285 | not_rampage = 1; | |
286 | ||
287 | if (ram_page == not_rampage) | |
288 | return -1; | |
289 | } | |
290 | ||
291 | return ram_page; | |
292 | } | |
293 | ||
9542ada8 | 294 | /* |
f5841740 VP |
295 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
296 | * Here we do two pass: | |
297 | * - Find the memtype of all the pages in the range, look for any conflicts | |
298 | * - In case of no conflicts, set the new memtype for pages in the range | |
9542ada8 | 299 | * |
f5841740 | 300 | * Caller must hold memtype_lock for atomicity. |
9542ada8 SS |
301 | */ |
302 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | |
ad2cde16 | 303 | unsigned long *new_type) |
9542ada8 SS |
304 | { |
305 | struct page *page; | |
f5841740 VP |
306 | u64 pfn; |
307 | ||
308 | if (req_type == _PAGE_CACHE_UC) { | |
309 | /* We do not support strong UC */ | |
310 | WARN_ON_ONCE(1); | |
311 | req_type = _PAGE_CACHE_UC_MINUS; | |
312 | } | |
9542ada8 SS |
313 | |
314 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
f5841740 | 315 | unsigned long type; |
9542ada8 | 316 | |
f5841740 VP |
317 | page = pfn_to_page(pfn); |
318 | type = get_page_memtype(page); | |
319 | if (type != -1) { | |
320 | printk(KERN_INFO "reserve_ram_pages_type failed " | |
321 | "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", | |
322 | start, end, type, req_type); | |
323 | if (new_type) | |
324 | *new_type = type; | |
325 | ||
326 | return -EBUSY; | |
327 | } | |
9542ada8 | 328 | } |
9542ada8 | 329 | |
f5841740 VP |
330 | if (new_type) |
331 | *new_type = req_type; | |
332 | ||
333 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
9542ada8 | 334 | page = pfn_to_page(pfn); |
f5841740 | 335 | set_page_memtype(page, req_type); |
9542ada8 | 336 | } |
f5841740 | 337 | return 0; |
9542ada8 SS |
338 | } |
339 | ||
340 | static int free_ram_pages_type(u64 start, u64 end) | |
341 | { | |
342 | struct page *page; | |
f5841740 | 343 | u64 pfn; |
9542ada8 SS |
344 | |
345 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
346 | page = pfn_to_page(pfn); | |
f5841740 | 347 | set_page_memtype(page, -1); |
9542ada8 SS |
348 | } |
349 | return 0; | |
9542ada8 SS |
350 | } |
351 | ||
fd12a0d6 JS |
352 | int default_is_untracked_pat_range(u64 start, u64 end) |
353 | { | |
354 | return is_ISA_range(start, end); | |
355 | } | |
356 | ||
e7f260a2 | 357 | /* |
358 | * req_type typically has one of the: | |
359 | * - _PAGE_CACHE_WB | |
360 | * - _PAGE_CACHE_WC | |
361 | * - _PAGE_CACHE_UC_MINUS | |
362 | * - _PAGE_CACHE_UC | |
363 | * | |
364 | * req_type will have a special case value '-1', when requester want to inherit | |
365 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | |
366 | * | |
ac97991e AH |
367 | * If new_type is NULL, function will return an error if it cannot reserve the |
368 | * region with req_type. If new_type is non-NULL, function will return | |
369 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 370 | * it will return a negative return value. |
371 | */ | |
2e5d9c85 | 372 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
ad2cde16 | 373 | unsigned long *new_type) |
2e5d9c85 | 374 | { |
ac97991e | 375 | struct memtype *new, *entry; |
2e5d9c85 | 376 | unsigned long actual_type; |
f6887264 | 377 | struct list_head *where; |
9542ada8 | 378 | int is_range_ram; |
ad2cde16 | 379 | int err = 0; |
2e5d9c85 | 380 | |
ad2cde16 | 381 | BUG_ON(start >= end); /* end is exclusive */ |
69e26be9 | 382 | |
499f8f84 | 383 | if (!pat_enabled) { |
e7f260a2 | 384 | /* This is identical to page table setting without PAT */ |
ac97991e AH |
385 | if (new_type) { |
386 | if (req_type == -1) | |
387 | *new_type = _PAGE_CACHE_WB; | |
5fc51746 VP |
388 | else if (req_type == _PAGE_CACHE_WC) |
389 | *new_type = _PAGE_CACHE_UC_MINUS; | |
ac97991e AH |
390 | else |
391 | *new_type = req_type & _PAGE_CACHE_MASK; | |
e7f260a2 | 392 | } |
2e5d9c85 | 393 | return 0; |
394 | } | |
395 | ||
396 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
fd12a0d6 | 397 | if (x86_platform.is_untracked_pat_range(start, end - 1)) { |
ac97991e AH |
398 | if (new_type) |
399 | *new_type = _PAGE_CACHE_WB; | |
2e5d9c85 | 400 | return 0; |
401 | } | |
402 | ||
b6ff32d9 SS |
403 | /* |
404 | * Call mtrr_lookup to get the type hint. This is an | |
405 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
406 | * tools and ACPI tools). Use WB request for WB memory and use | |
407 | * UC_MINUS otherwise. | |
408 | */ | |
409 | actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); | |
2e5d9c85 | 410 | |
95971342 SS |
411 | if (new_type) |
412 | *new_type = actual_type; | |
413 | ||
be03d9e8 | 414 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
415 | if (is_range_ram == 1) { |
416 | ||
417 | spin_lock(&memtype_lock); | |
418 | err = reserve_ram_pages_type(start, end, req_type, new_type); | |
419 | spin_unlock(&memtype_lock); | |
420 | ||
421 | return err; | |
422 | } else if (is_range_ram < 0) { | |
9542ada8 | 423 | return -EINVAL; |
f5841740 | 424 | } |
9542ada8 | 425 | |
ac97991e AH |
426 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
427 | if (!new) | |
2e5d9c85 | 428 | return -ENOMEM; |
429 | ||
ad2cde16 IM |
430 | new->start = start; |
431 | new->end = end; | |
432 | new->type = actual_type; | |
2e5d9c85 | 433 | |
2e5d9c85 | 434 | spin_lock(&memtype_lock); |
435 | ||
436 | /* Search for existing mapping that overlaps the current range */ | |
f6887264 | 437 | where = NULL; |
dcb73bf4 | 438 | list_for_each_entry(entry, &memtype_list, nd) { |
33af9039 | 439 | if (end <= entry->start) { |
f6887264 | 440 | where = entry->nd.prev; |
2e5d9c85 | 441 | break; |
33af9039 | 442 | } else if (start <= entry->start) { /* end > entry->start */ |
64fe44c3 | 443 | err = chk_conflict(new, entry, new_type); |
33af9039 AH |
444 | if (!err) { |
445 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | |
446 | entry->start, entry->end); | |
447 | where = entry->nd.prev; | |
2e5d9c85 | 448 | } |
2e5d9c85 | 449 | break; |
33af9039 | 450 | } else if (start < entry->end) { /* start > entry->start */ |
64fe44c3 | 451 | err = chk_conflict(new, entry, new_type); |
33af9039 AH |
452 | if (!err) { |
453 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | |
454 | entry->start, entry->end); | |
80c5e73d VP |
455 | |
456 | /* | |
457 | * Move to right position in the linked | |
458 | * list to add this new entry | |
459 | */ | |
460 | list_for_each_entry_continue(entry, | |
461 | &memtype_list, nd) { | |
462 | if (start <= entry->start) { | |
463 | where = entry->nd.prev; | |
464 | break; | |
465 | } | |
466 | } | |
2e5d9c85 | 467 | } |
2e5d9c85 | 468 | break; |
469 | } | |
470 | } | |
471 | ||
472 | if (err) { | |
3e9c83b3 AH |
473 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " |
474 | "track %s, req %s\n", | |
475 | start, end, cattr_name(new->type), cattr_name(req_type)); | |
ac97991e | 476 | kfree(new); |
2e5d9c85 | 477 | spin_unlock(&memtype_lock); |
ad2cde16 | 478 | |
2e5d9c85 | 479 | return err; |
480 | } | |
481 | ||
f6887264 AH |
482 | if (where) |
483 | list_add(&new->nd, where); | |
484 | else | |
ac97991e | 485 | list_add_tail(&new->nd, &memtype_list); |
6997ab49 | 486 | |
335ef896 VP |
487 | memtype_rb_insert(&memtype_rbroot, new); |
488 | ||
2e5d9c85 | 489 | spin_unlock(&memtype_lock); |
3e9c83b3 AH |
490 | |
491 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | |
492 | start, end, cattr_name(new->type), cattr_name(req_type), | |
493 | new_type ? cattr_name(*new_type) : "-"); | |
494 | ||
2e5d9c85 | 495 | return err; |
496 | } | |
497 | ||
498 | int free_memtype(u64 start, u64 end) | |
499 | { | |
335ef896 | 500 | struct memtype *entry, *saved_entry; |
2e5d9c85 | 501 | int err = -EINVAL; |
9542ada8 | 502 | int is_range_ram; |
2e5d9c85 | 503 | |
69e26be9 | 504 | if (!pat_enabled) |
2e5d9c85 | 505 | return 0; |
2e5d9c85 | 506 | |
507 | /* Low ISA region is always mapped WB. No need to track */ | |
fd12a0d6 | 508 | if (x86_platform.is_untracked_pat_range(start, end - 1)) |
2e5d9c85 | 509 | return 0; |
2e5d9c85 | 510 | |
be03d9e8 | 511 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
512 | if (is_range_ram == 1) { |
513 | ||
514 | spin_lock(&memtype_lock); | |
515 | err = free_ram_pages_type(start, end); | |
516 | spin_unlock(&memtype_lock); | |
517 | ||
518 | return err; | |
519 | } else if (is_range_ram < 0) { | |
9542ada8 | 520 | return -EINVAL; |
f5841740 | 521 | } |
9542ada8 | 522 | |
2e5d9c85 | 523 | spin_lock(&memtype_lock); |
335ef896 VP |
524 | |
525 | entry = memtype_rb_search(&memtype_rbroot, start); | |
526 | if (unlikely(entry == NULL)) | |
527 | goto unlock_ret; | |
528 | ||
529 | /* | |
530 | * Saved entry points to an entry with start same or less than what | |
531 | * we searched for. Now go through the list in both directions to look | |
532 | * for the entry that matches with both start and end, with list stored | |
533 | * in sorted start address | |
534 | */ | |
535 | saved_entry = entry; | |
dcb73bf4 | 536 | list_for_each_entry_from(entry, &memtype_list, nd) { |
ac97991e | 537 | if (entry->start == start && entry->end == end) { |
335ef896 VP |
538 | rb_erase(&entry->rb, &memtype_rbroot); |
539 | list_del(&entry->nd); | |
540 | kfree(entry); | |
541 | err = 0; | |
542 | break; | |
543 | } else if (entry->start > start) { | |
544 | break; | |
545 | } | |
546 | } | |
547 | ||
548 | if (!err) | |
549 | goto unlock_ret; | |
80c5e73d | 550 | |
335ef896 VP |
551 | entry = saved_entry; |
552 | list_for_each_entry_reverse(entry, &memtype_list, nd) { | |
553 | if (entry->start == start && entry->end == end) { | |
554 | rb_erase(&entry->rb, &memtype_rbroot); | |
ac97991e AH |
555 | list_del(&entry->nd); |
556 | kfree(entry); | |
2e5d9c85 | 557 | err = 0; |
558 | break; | |
335ef896 VP |
559 | } else if (entry->start < start) { |
560 | break; | |
2e5d9c85 | 561 | } |
562 | } | |
335ef896 | 563 | unlock_ret: |
2e5d9c85 | 564 | spin_unlock(&memtype_lock); |
565 | ||
566 | if (err) { | |
28eb559b | 567 | printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", |
2e5d9c85 | 568 | current->comm, current->pid, start, end); |
569 | } | |
6997ab49 | 570 | |
77b52b4c | 571 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); |
ad2cde16 | 572 | |
2e5d9c85 | 573 | return err; |
574 | } | |
575 | ||
f0970c13 | 576 | |
637b86e7 VP |
577 | /** |
578 | * lookup_memtype - Looksup the memory type for a physical address | |
579 | * @paddr: physical address of which memory type needs to be looked up | |
580 | * | |
581 | * Only to be called when PAT is enabled | |
582 | * | |
583 | * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or | |
584 | * _PAGE_CACHE_UC | |
585 | */ | |
586 | static unsigned long lookup_memtype(u64 paddr) | |
587 | { | |
588 | int rettype = _PAGE_CACHE_WB; | |
589 | struct memtype *entry; | |
590 | ||
fd12a0d6 | 591 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE - 1)) |
637b86e7 VP |
592 | return rettype; |
593 | ||
594 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | |
595 | struct page *page; | |
596 | spin_lock(&memtype_lock); | |
597 | page = pfn_to_page(paddr >> PAGE_SHIFT); | |
598 | rettype = get_page_memtype(page); | |
599 | spin_unlock(&memtype_lock); | |
600 | /* | |
601 | * -1 from get_page_memtype() implies RAM page is in its | |
602 | * default state and not reserved, and hence of type WB | |
603 | */ | |
604 | if (rettype == -1) | |
605 | rettype = _PAGE_CACHE_WB; | |
606 | ||
607 | return rettype; | |
608 | } | |
609 | ||
610 | spin_lock(&memtype_lock); | |
611 | ||
612 | entry = memtype_rb_search(&memtype_rbroot, paddr); | |
613 | if (entry != NULL) | |
614 | rettype = entry->type; | |
615 | else | |
616 | rettype = _PAGE_CACHE_UC_MINUS; | |
617 | ||
618 | spin_unlock(&memtype_lock); | |
619 | return rettype; | |
620 | } | |
621 | ||
9fd126bc VP |
622 | /** |
623 | * io_reserve_memtype - Request a memory type mapping for a region of memory | |
624 | * @start: start (physical address) of the region | |
625 | * @end: end (physical address) of the region | |
626 | * @type: A pointer to memtype, with requested type. On success, requested | |
627 | * or any other compatible type that was available for the region is returned | |
628 | * | |
629 | * On success, returns 0 | |
630 | * On failure, returns non-zero | |
631 | */ | |
632 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | |
633 | unsigned long *type) | |
634 | { | |
b855192c | 635 | resource_size_t size = end - start; |
9fd126bc VP |
636 | unsigned long req_type = *type; |
637 | unsigned long new_type; | |
638 | int ret; | |
639 | ||
b855192c | 640 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
9fd126bc VP |
641 | |
642 | ret = reserve_memtype(start, end, req_type, &new_type); | |
643 | if (ret) | |
644 | goto out_err; | |
645 | ||
b855192c | 646 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
9fd126bc VP |
647 | goto out_free; |
648 | ||
b855192c | 649 | if (kernel_map_sync_memtype(start, size, new_type) < 0) |
9fd126bc VP |
650 | goto out_free; |
651 | ||
652 | *type = new_type; | |
653 | return 0; | |
654 | ||
655 | out_free: | |
656 | free_memtype(start, end); | |
657 | ret = -EBUSY; | |
658 | out_err: | |
659 | return ret; | |
660 | } | |
661 | ||
662 | /** | |
663 | * io_free_memtype - Release a memory type mapping for a region of memory | |
664 | * @start: start (physical address) of the region | |
665 | * @end: end (physical address) of the region | |
666 | */ | |
667 | void io_free_memtype(resource_size_t start, resource_size_t end) | |
668 | { | |
669 | free_memtype(start, end); | |
670 | } | |
671 | ||
f0970c13 | 672 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
673 | unsigned long size, pgprot_t vma_prot) | |
674 | { | |
675 | return vma_prot; | |
676 | } | |
677 | ||
d092633b IM |
678 | #ifdef CONFIG_STRICT_DEVMEM |
679 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ | |
0124cecf VP |
680 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
681 | { | |
682 | return 1; | |
683 | } | |
684 | #else | |
9e41bff2 | 685 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
0124cecf VP |
686 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
687 | { | |
688 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
689 | u64 to = from + size; | |
690 | u64 cursor = from; | |
691 | ||
9e41bff2 RT |
692 | if (!pat_enabled) |
693 | return 1; | |
694 | ||
0124cecf VP |
695 | while (cursor < to) { |
696 | if (!devmem_is_allowed(pfn)) { | |
697 | printk(KERN_INFO | |
698 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | |
699 | current->comm, from, to); | |
700 | return 0; | |
701 | } | |
702 | cursor += PAGE_SIZE; | |
703 | pfn++; | |
704 | } | |
705 | return 1; | |
706 | } | |
d092633b | 707 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 708 | |
f0970c13 | 709 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
710 | unsigned long size, pgprot_t *vma_prot) | |
711 | { | |
0c3c8a18 | 712 | unsigned long flags = _PAGE_CACHE_WB; |
f0970c13 | 713 | |
0124cecf VP |
714 | if (!range_is_allowed(pfn, size)) |
715 | return 0; | |
716 | ||
f0970c13 | 717 | if (file->f_flags & O_SYNC) { |
28df82eb | 718 | flags = _PAGE_CACHE_UC_MINUS; |
f0970c13 | 719 | } |
720 | ||
721 | #ifdef CONFIG_X86_32 | |
722 | /* | |
723 | * On the PPro and successors, the MTRRs are used to set | |
724 | * memory types for physical addresses outside main memory, | |
725 | * so blindly setting UC or PWT on those pages is wrong. | |
726 | * For Pentiums and earlier, the surround logic should disable | |
727 | * caching for the high addresses through the KEN pin, but | |
728 | * we maintain the tradition of paranoia in this code. | |
729 | */ | |
499f8f84 | 730 | if (!pat_enabled && |
cd7a4e93 AH |
731 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
732 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | |
733 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | |
734 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | |
735 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | |
e7f260a2 | 736 | flags = _PAGE_CACHE_UC; |
f0970c13 | 737 | } |
738 | #endif | |
739 | ||
e7f260a2 | 740 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
741 | flags); | |
f0970c13 | 742 | return 1; |
743 | } | |
e7f260a2 | 744 | |
7880f746 VP |
745 | /* |
746 | * Change the memory type for the physial address range in kernel identity | |
747 | * mapping space if that range is a part of identity map. | |
748 | */ | |
749 | int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) | |
750 | { | |
751 | unsigned long id_sz; | |
752 | ||
5fc51746 | 753 | if (base >= __pa(high_memory)) |
7880f746 VP |
754 | return 0; |
755 | ||
756 | id_sz = (__pa(high_memory) < base + size) ? | |
757 | __pa(high_memory) - base : | |
758 | size; | |
759 | ||
760 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { | |
761 | printk(KERN_INFO | |
762 | "%s:%d ioremap_change_attr failed %s " | |
763 | "for %Lx-%Lx\n", | |
764 | current->comm, current->pid, | |
765 | cattr_name(flags), | |
766 | base, (unsigned long long)(base + size)); | |
767 | return -EINVAL; | |
768 | } | |
769 | return 0; | |
770 | } | |
771 | ||
5899329b | 772 | /* |
773 | * Internal interface to reserve a range of physical memory with prot. | |
774 | * Reserved non RAM regions only and after successful reserve_memtype, | |
775 | * this func also keeps identity mapping (if any) in sync with this new prot. | |
776 | */ | |
cdecff68 | 777 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
778 | int strict_prot) | |
5899329b | 779 | { |
780 | int is_ram = 0; | |
7880f746 | 781 | int ret; |
cdecff68 | 782 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); |
0c3c8a18 | 783 | unsigned long flags = want_flags; |
5899329b | 784 | |
be03d9e8 | 785 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 786 | |
be03d9e8 | 787 | /* |
d886c73c VP |
788 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
789 | * track of number of mappings of RAM pages. We can assert that | |
790 | * the type requested matches the type of first page in the range. | |
be03d9e8 | 791 | */ |
d886c73c VP |
792 | if (is_ram) { |
793 | if (!pat_enabled) | |
794 | return 0; | |
795 | ||
796 | flags = lookup_memtype(paddr); | |
797 | if (want_flags != flags) { | |
798 | printk(KERN_WARNING | |
799 | "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", | |
800 | current->comm, current->pid, | |
801 | cattr_name(want_flags), | |
802 | (unsigned long long)paddr, | |
803 | (unsigned long long)(paddr + size), | |
804 | cattr_name(flags)); | |
805 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
806 | (~_PAGE_CACHE_MASK)) | | |
807 | flags); | |
808 | } | |
4bb9c5c0 | 809 | return 0; |
d886c73c | 810 | } |
5899329b | 811 | |
812 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | |
813 | if (ret) | |
814 | return ret; | |
815 | ||
816 | if (flags != want_flags) { | |
1adcaafe SS |
817 | if (strict_prot || |
818 | !is_new_memtype_allowed(paddr, size, want_flags, flags)) { | |
cdecff68 | 819 | free_memtype(paddr, paddr + size); |
820 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | |
821 | " for %Lx-%Lx, got %s\n", | |
822 | current->comm, current->pid, | |
823 | cattr_name(want_flags), | |
824 | (unsigned long long)paddr, | |
825 | (unsigned long long)(paddr + size), | |
826 | cattr_name(flags)); | |
827 | return -EINVAL; | |
828 | } | |
829 | /* | |
830 | * We allow returning different type than the one requested in | |
831 | * non strict case. | |
832 | */ | |
833 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
834 | (~_PAGE_CACHE_MASK)) | | |
835 | flags); | |
5899329b | 836 | } |
837 | ||
7880f746 | 838 | if (kernel_map_sync_memtype(paddr, size, flags) < 0) { |
5899329b | 839 | free_memtype(paddr, paddr + size); |
5899329b | 840 | return -EINVAL; |
841 | } | |
842 | return 0; | |
843 | } | |
844 | ||
845 | /* | |
846 | * Internal interface to free a range of physical memory. | |
847 | * Frees non RAM regions only. | |
848 | */ | |
849 | static void free_pfn_range(u64 paddr, unsigned long size) | |
850 | { | |
851 | int is_ram; | |
852 | ||
be03d9e8 | 853 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 854 | if (is_ram == 0) |
855 | free_memtype(paddr, paddr + size); | |
856 | } | |
857 | ||
858 | /* | |
859 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | |
860 | * copied through copy_page_range(). | |
861 | * | |
862 | * If the vma has a linear pfn mapping for the entire range, we get the prot | |
863 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | |
5899329b | 864 | */ |
865 | int track_pfn_vma_copy(struct vm_area_struct *vma) | |
866 | { | |
c1c15b65 | 867 | resource_size_t paddr; |
982d789a | 868 | unsigned long prot; |
4b065046 | 869 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
cdecff68 | 870 | pgprot_t pgprot; |
5899329b | 871 | |
5899329b | 872 | if (is_linear_pfn_mapping(vma)) { |
873 | /* | |
982d789a | 874 | * reserve the whole chunk covered by vma. We need the |
875 | * starting address and protection from pte. | |
5899329b | 876 | */ |
4b065046 | 877 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
5899329b | 878 | WARN_ON_ONCE(1); |
982d789a | 879 | return -EINVAL; |
5899329b | 880 | } |
cdecff68 | 881 | pgprot = __pgprot(prot); |
882 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | |
5899329b | 883 | } |
884 | ||
5899329b | 885 | return 0; |
5899329b | 886 | } |
887 | ||
888 | /* | |
889 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | |
890 | * for physical range indicated by pfn and size. | |
891 | * | |
892 | * prot is passed in as a parameter for the new mapping. If the vma has a | |
893 | * linear pfn mapping for the entire range reserve the entire vma range with | |
894 | * single reserve_pfn_range call. | |
5899329b | 895 | */ |
e4b866ed | 896 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
5899329b | 897 | unsigned long pfn, unsigned long size) |
898 | { | |
10876376 | 899 | unsigned long flags; |
c1c15b65 | 900 | resource_size_t paddr; |
4b065046 | 901 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
5899329b | 902 | |
5899329b | 903 | if (is_linear_pfn_mapping(vma)) { |
904 | /* reserve the whole chunk starting from vm_pgoff */ | |
c1c15b65 | 905 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
cdecff68 | 906 | return reserve_pfn_range(paddr, vma_size, prot, 0); |
5899329b | 907 | } |
908 | ||
10876376 VP |
909 | if (!pat_enabled) |
910 | return 0; | |
911 | ||
912 | /* for vm_insert_pfn and friends, we set prot based on lookup */ | |
913 | flags = lookup_memtype(pfn << PAGE_SHIFT); | |
914 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | |
915 | flags); | |
916 | ||
5899329b | 917 | return 0; |
5899329b | 918 | } |
919 | ||
920 | /* | |
921 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | |
922 | * untrack can be called for a specific region indicated by pfn and size or | |
923 | * can be for the entire vma (in which case size can be zero). | |
924 | */ | |
925 | void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |
926 | unsigned long size) | |
927 | { | |
c1c15b65 | 928 | resource_size_t paddr; |
4b065046 | 929 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
5899329b | 930 | |
5899329b | 931 | if (is_linear_pfn_mapping(vma)) { |
932 | /* free the whole chunk starting from vm_pgoff */ | |
c1c15b65 | 933 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
5899329b | 934 | free_pfn_range(paddr, vma_size); |
935 | return; | |
936 | } | |
5899329b | 937 | } |
938 | ||
2520bd31 | 939 | pgprot_t pgprot_writecombine(pgprot_t prot) |
940 | { | |
941 | if (pat_enabled) | |
942 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); | |
943 | else | |
944 | return pgprot_noncached(prot); | |
945 | } | |
92b9af9e | 946 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
2520bd31 | 947 | |
012f09e7 | 948 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 949 | |
950 | /* get Nth element of the linked list */ | |
951 | static struct memtype *memtype_get_idx(loff_t pos) | |
952 | { | |
953 | struct memtype *list_node, *print_entry; | |
954 | int i = 1; | |
955 | ||
956 | print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | |
957 | if (!print_entry) | |
958 | return NULL; | |
959 | ||
960 | spin_lock(&memtype_lock); | |
961 | list_for_each_entry(list_node, &memtype_list, nd) { | |
962 | if (pos == i) { | |
963 | *print_entry = *list_node; | |
964 | spin_unlock(&memtype_lock); | |
965 | return print_entry; | |
966 | } | |
967 | ++i; | |
968 | } | |
969 | spin_unlock(&memtype_lock); | |
970 | kfree(print_entry); | |
ad2cde16 | 971 | |
fec0962e | 972 | return NULL; |
973 | } | |
974 | ||
975 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
976 | { | |
977 | if (*pos == 0) { | |
978 | ++*pos; | |
979 | seq_printf(seq, "PAT memtype list:\n"); | |
980 | } | |
981 | ||
982 | return memtype_get_idx(*pos); | |
983 | } | |
984 | ||
985 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
986 | { | |
987 | ++*pos; | |
988 | return memtype_get_idx(*pos); | |
989 | } | |
990 | ||
991 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
992 | { | |
993 | } | |
994 | ||
995 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
996 | { | |
997 | struct memtype *print_entry = (struct memtype *)v; | |
998 | ||
999 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | |
1000 | print_entry->start, print_entry->end); | |
1001 | kfree(print_entry); | |
ad2cde16 | 1002 | |
fec0962e | 1003 | return 0; |
1004 | } | |
1005 | ||
d535e431 | 1006 | static const struct seq_operations memtype_seq_ops = { |
fec0962e | 1007 | .start = memtype_seq_start, |
1008 | .next = memtype_seq_next, | |
1009 | .stop = memtype_seq_stop, | |
1010 | .show = memtype_seq_show, | |
1011 | }; | |
1012 | ||
1013 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
1014 | { | |
1015 | return seq_open(file, &memtype_seq_ops); | |
1016 | } | |
1017 | ||
1018 | static const struct file_operations memtype_fops = { | |
1019 | .open = memtype_seq_open, | |
1020 | .read = seq_read, | |
1021 | .llseek = seq_lseek, | |
1022 | .release = seq_release, | |
1023 | }; | |
1024 | ||
1025 | static int __init pat_memtype_list_init(void) | |
1026 | { | |
1027 | debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, | |
1028 | NULL, &memtype_fops); | |
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | late_initcall(pat_memtype_list_init); | |
1033 | ||
012f09e7 | 1034 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |