x86: Use new cache mode type in setting page attributes
[linux-2.6-block.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
5a0e3ad6 15#include <linux/slab.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
335ef896 18#include <linux/rbtree.h>
2e5d9c85 19
ad2cde16 20#include <asm/cacheflush.h>
2e5d9c85 21#include <asm/processor.h>
ad2cde16 22#include <asm/tlbflush.h>
fd12a0d6 23#include <asm/x86_init.h>
2e5d9c85 24#include <asm/pgtable.h>
2e5d9c85 25#include <asm/fcntl.h>
ad2cde16 26#include <asm/e820.h>
2e5d9c85 27#include <asm/mtrr.h>
ad2cde16
IM
28#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
e7f260a2 31#include <asm/io.h>
2e5d9c85 32
be5a0c12 33#include "pat_internal.h"
34
8d4a4300 35#ifdef CONFIG_X86_PAT
499f8f84 36int __read_mostly pat_enabled = 1;
2e5d9c85 37
1ee4bd92 38static inline void pat_disable(const char *reason)
2e5d9c85 39{
499f8f84 40 pat_enabled = 0;
8d4a4300 41 printk(KERN_INFO "%s\n", reason);
2e5d9c85 42}
2e5d9c85 43
be524fb9 44static int __init nopat(char *str)
2e5d9c85 45{
8d4a4300 46 pat_disable("PAT support disabled.");
2e5d9c85 47 return 0;
48}
8d4a4300 49early_param("nopat", nopat);
75a04811
PA
50#else
51static inline void pat_disable(const char *reason)
52{
53 (void)reason;
54}
8d4a4300
TG
55#endif
56
77b52b4c 57
be5a0c12 58int pat_debug_enable;
ad2cde16 59
77b52b4c
VP
60static int __init pat_debug_setup(char *str)
61{
be5a0c12 62 pat_debug_enable = 1;
77b52b4c
VP
63 return 0;
64}
65__setup("debugpat", pat_debug_setup);
66
8d4a4300 67static u64 __read_mostly boot_pat_state;
2e5d9c85 68
69enum {
70 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */
72 PAT_WT = 4, /* Write Through */
73 PAT_WP = 5, /* Write Protected */
74 PAT_WB = 6, /* Write Back (default) */
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76};
77
cd7a4e93 78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 79
80void pat_init(void)
81{
82 u64 pat;
e23a8b6a 83 bool boot_cpu = !boot_pat_state;
2e5d9c85 84
499f8f84 85 if (!pat_enabled)
2e5d9c85 86 return;
87
75a04811
PA
88 if (!cpu_has_pat) {
89 if (!boot_pat_state) {
90 pat_disable("PAT not supported by CPU.");
91 return;
92 } else {
93 /*
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
96 * undo PAT.
97 */
98 printk(KERN_ERR "PAT enabled, "
99 "but not supported by secondary CPU\n");
100 BUG();
101 }
8d4a4300 102 }
2e5d9c85 103
104 /* Set PWT to Write-Combining. All other bits stay the same */
105 /*
106 * PTE encoding used in Linux:
107 * PAT
108 * |PCD
109 * ||PWT
110 * |||
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
115 * PAT bit unused
116 */
cd7a4e93
AH
117 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
118 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 119
120 /* Boot CPU check */
8d4a4300 121 if (!boot_pat_state)
2e5d9c85 122 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 123
124 wrmsrl(MSR_IA32_CR_PAT, pat);
e23a8b6a
RD
125
126 if (boot_cpu)
127 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128 smp_processor_id(), boot_pat_state, pat);
2e5d9c85 129}
130
131#undef PAT
132
9e41a49a 133static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 134
2e5d9c85 135/*
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a
141 */
6cf514fc 142static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
2e5d9c85 143{
c26421d0
VP
144 /*
145 * Look for MTRR hint to get the effective type in case where PAT
146 * request is for WB.
147 */
dd0c7c49
AH
148 if (req_type == _PAGE_CACHE_WB) {
149 u8 mtrr_type;
150
151 mtrr_type = mtrr_type_lookup(start, end);
b6ff32d9
SS
152 if (mtrr_type != MTRR_TYPE_WRBACK)
153 return _PAGE_CACHE_UC_MINUS;
154
155 return _PAGE_CACHE_WB;
dd0c7c49
AH
156 }
157
158 return req_type;
2e5d9c85 159}
160
fa83523f
JD
161struct pagerange_state {
162 unsigned long cur_pfn;
163 int ram;
164 int not_ram;
165};
166
167static int
168pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
169{
170 struct pagerange_state *state = arg;
171
172 state->not_ram |= initial_pfn > state->cur_pfn;
173 state->ram |= total_nr_pages > 0;
174 state->cur_pfn = initial_pfn + total_nr_pages;
175
176 return state->ram && state->not_ram;
177}
178
3709c857 179static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8 180{
fa83523f
JD
181 int ret = 0;
182 unsigned long start_pfn = start >> PAGE_SHIFT;
183 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
184 struct pagerange_state state = {start_pfn, 0, 0};
185
186 /*
187 * For legacy reasons, physical address range in the legacy ISA
188 * region is tracked as non-RAM. This will allow users of
189 * /dev/mem to map portions of legacy ISA region, even when
190 * some of those portions are listed(or not even listed) with
191 * different e820 types(RAM/reserved/..)
192 */
193 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
194 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
195
196 if (start_pfn < end_pfn) {
197 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
198 &state, pagerange_is_ram_callback);
be03d9e8
SS
199 }
200
fa83523f 201 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
be03d9e8
SS
202}
203
9542ada8 204/*
f5841740
VP
205 * For RAM pages, we use page flags to mark the pages with appropriate type.
206 * Here we do two pass:
207 * - Find the memtype of all the pages in the range, look for any conflicts
208 * - In case of no conflicts, set the new memtype for pages in the range
9542ada8
SS
209 */
210static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
ad2cde16 211 unsigned long *new_type)
9542ada8
SS
212{
213 struct page *page;
f5841740
VP
214 u64 pfn;
215
216 if (req_type == _PAGE_CACHE_UC) {
217 /* We do not support strong UC */
218 WARN_ON_ONCE(1);
219 req_type = _PAGE_CACHE_UC_MINUS;
220 }
9542ada8
SS
221
222 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
f5841740 223 unsigned long type;
9542ada8 224
f5841740
VP
225 page = pfn_to_page(pfn);
226 type = get_page_memtype(page);
227 if (type != -1) {
365811d6
BH
228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
229 start, end - 1, type, req_type);
f5841740
VP
230 if (new_type)
231 *new_type = type;
232
233 return -EBUSY;
234 }
9542ada8 235 }
9542ada8 236
f5841740
VP
237 if (new_type)
238 *new_type = req_type;
239
240 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 241 page = pfn_to_page(pfn);
f5841740 242 set_page_memtype(page, req_type);
9542ada8 243 }
f5841740 244 return 0;
9542ada8
SS
245}
246
247static int free_ram_pages_type(u64 start, u64 end)
248{
249 struct page *page;
f5841740 250 u64 pfn;
9542ada8
SS
251
252 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
253 page = pfn_to_page(pfn);
f5841740 254 set_page_memtype(page, -1);
9542ada8
SS
255 }
256 return 0;
9542ada8
SS
257}
258
e7f260a2 259/*
260 * req_type typically has one of the:
261 * - _PAGE_CACHE_WB
262 * - _PAGE_CACHE_WC
263 * - _PAGE_CACHE_UC_MINUS
264 * - _PAGE_CACHE_UC
265 *
ac97991e
AH
266 * If new_type is NULL, function will return an error if it cannot reserve the
267 * region with req_type. If new_type is non-NULL, function will return
268 * available type in new_type in case of no error. In case of any error
e7f260a2 269 * it will return a negative return value.
270 */
2e5d9c85 271int reserve_memtype(u64 start, u64 end, unsigned long req_type,
ad2cde16 272 unsigned long *new_type)
2e5d9c85 273{
be5a0c12 274 struct memtype *new;
2e5d9c85 275 unsigned long actual_type;
9542ada8 276 int is_range_ram;
ad2cde16 277 int err = 0;
2e5d9c85 278
ad2cde16 279 BUG_ON(start >= end); /* end is exclusive */
69e26be9 280
499f8f84 281 if (!pat_enabled) {
e7f260a2 282 /* This is identical to page table setting without PAT */
ac97991e 283 if (new_type) {
83ea05ea 284 if (req_type == _PAGE_CACHE_WC)
5fc51746 285 *new_type = _PAGE_CACHE_UC_MINUS;
ac97991e
AH
286 else
287 *new_type = req_type & _PAGE_CACHE_MASK;
e7f260a2 288 }
2e5d9c85 289 return 0;
290 }
291
292 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 293 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e
AH
294 if (new_type)
295 *new_type = _PAGE_CACHE_WB;
2e5d9c85 296 return 0;
297 }
298
b6ff32d9
SS
299 /*
300 * Call mtrr_lookup to get the type hint. This is an
301 * optimization for /dev/mem mmap'ers into WB memory (BIOS
302 * tools and ACPI tools). Use WB request for WB memory and use
303 * UC_MINUS otherwise.
304 */
305 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
2e5d9c85 306
95971342
SS
307 if (new_type)
308 *new_type = actual_type;
309
be03d9e8 310 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
311 if (is_range_ram == 1) {
312
f5841740 313 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
314
315 return err;
316 } else if (is_range_ram < 0) {
9542ada8 317 return -EINVAL;
f5841740 318 }
9542ada8 319
6a4f3b52 320 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
ac97991e 321 if (!new)
2e5d9c85 322 return -ENOMEM;
323
ad2cde16
IM
324 new->start = start;
325 new->end = end;
326 new->type = actual_type;
2e5d9c85 327
2e5d9c85 328 spin_lock(&memtype_lock);
329
9e41a49a 330 err = rbt_memtype_check_insert(new, new_type);
2e5d9c85 331 if (err) {
365811d6
BH
332 printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
333 start, end - 1,
334 cattr_name(new->type), cattr_name(req_type));
ac97991e 335 kfree(new);
2e5d9c85 336 spin_unlock(&memtype_lock);
ad2cde16 337
2e5d9c85 338 return err;
339 }
340
2e5d9c85 341 spin_unlock(&memtype_lock);
3e9c83b3 342
365811d6
BH
343 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
344 start, end - 1, cattr_name(new->type), cattr_name(req_type),
3e9c83b3
AH
345 new_type ? cattr_name(*new_type) : "-");
346
2e5d9c85 347 return err;
348}
349
350int free_memtype(u64 start, u64 end)
351{
2e5d9c85 352 int err = -EINVAL;
9542ada8 353 int is_range_ram;
20413f27 354 struct memtype *entry;
2e5d9c85 355
69e26be9 356 if (!pat_enabled)
2e5d9c85 357 return 0;
2e5d9c85 358
359 /* Low ISA region is always mapped WB. No need to track */
8a271389 360 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 361 return 0;
2e5d9c85 362
be03d9e8 363 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
364 if (is_range_ram == 1) {
365
f5841740 366 err = free_ram_pages_type(start, end);
f5841740
VP
367
368 return err;
369 } else if (is_range_ram < 0) {
9542ada8 370 return -EINVAL;
f5841740 371 }
9542ada8 372
2e5d9c85 373 spin_lock(&memtype_lock);
20413f27 374 entry = rbt_memtype_erase(start, end);
2e5d9c85 375 spin_unlock(&memtype_lock);
376
20413f27 377 if (!entry) {
365811d6
BH
378 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
379 current->comm, current->pid, start, end - 1);
20413f27 380 return -EINVAL;
2e5d9c85 381 }
6997ab49 382
20413f27
XF
383 kfree(entry);
384
365811d6 385 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 386
20413f27 387 return 0;
2e5d9c85 388}
389
f0970c13 390
637b86e7
VP
391/**
392 * lookup_memtype - Looksup the memory type for a physical address
393 * @paddr: physical address of which memory type needs to be looked up
394 *
395 * Only to be called when PAT is enabled
396 *
2a374698
JG
397 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
398 * or _PAGE_CACHE_MODE_UC
637b86e7 399 */
2a374698 400static enum page_cache_mode lookup_memtype(u64 paddr)
637b86e7 401{
2a374698 402 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
403 struct memtype *entry;
404
8a271389 405 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
406 return rettype;
407
408 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
409 struct page *page;
637b86e7 410 page = pfn_to_page(paddr >> PAGE_SHIFT);
2a374698 411 rettype = pgprot2cachemode(__pgprot(get_page_memtype(page)));
637b86e7
VP
412 /*
413 * -1 from get_page_memtype() implies RAM page is in its
414 * default state and not reserved, and hence of type WB
415 */
416 if (rettype == -1)
2a374698 417 rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
418
419 return rettype;
420 }
421
422 spin_lock(&memtype_lock);
423
9e41a49a 424 entry = rbt_memtype_lookup(paddr);
637b86e7 425 if (entry != NULL)
2a374698 426 rettype = pgprot2cachemode(__pgprot(entry->type));
637b86e7 427 else
2a374698 428 rettype = _PAGE_CACHE_MODE_UC_MINUS;
637b86e7
VP
429
430 spin_unlock(&memtype_lock);
431 return rettype;
432}
433
9fd126bc
VP
434/**
435 * io_reserve_memtype - Request a memory type mapping for a region of memory
436 * @start: start (physical address) of the region
437 * @end: end (physical address) of the region
438 * @type: A pointer to memtype, with requested type. On success, requested
439 * or any other compatible type that was available for the region is returned
440 *
441 * On success, returns 0
442 * On failure, returns non-zero
443 */
444int io_reserve_memtype(resource_size_t start, resource_size_t end,
49a3b3cb 445 enum page_cache_mode *type)
9fd126bc 446{
b855192c 447 resource_size_t size = end - start;
49a3b3cb
JG
448 enum page_cache_mode req_type = *type;
449 enum page_cache_mode new_type;
450 unsigned long new_prot;
9fd126bc
VP
451 int ret;
452
b855192c 453 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc 454
49a3b3cb
JG
455 ret = reserve_memtype(start, end, cachemode2protval(req_type),
456 &new_prot);
9fd126bc
VP
457 if (ret)
458 goto out_err;
459
49a3b3cb
JG
460 new_type = pgprot2cachemode(__pgprot(new_prot));
461
462 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
463 goto out_free;
464
49a3b3cb 465 if (kernel_map_sync_memtype(start, size, new_prot) < 0)
9fd126bc
VP
466 goto out_free;
467
468 *type = new_type;
469 return 0;
470
471out_free:
472 free_memtype(start, end);
473 ret = -EBUSY;
474out_err:
475 return ret;
476}
477
478/**
479 * io_free_memtype - Release a memory type mapping for a region of memory
480 * @start: start (physical address) of the region
481 * @end: end (physical address) of the region
482 */
483void io_free_memtype(resource_size_t start, resource_size_t end)
484{
485 free_memtype(start, end);
486}
487
f0970c13 488pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
489 unsigned long size, pgprot_t vma_prot)
490{
491 return vma_prot;
492}
493
d092633b
IM
494#ifdef CONFIG_STRICT_DEVMEM
495/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
496static inline int range_is_allowed(unsigned long pfn, unsigned long size)
497{
498 return 1;
499}
500#else
9e41bff2 501/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
502static inline int range_is_allowed(unsigned long pfn, unsigned long size)
503{
504 u64 from = ((u64)pfn) << PAGE_SHIFT;
505 u64 to = from + size;
506 u64 cursor = from;
507
9e41bff2
RT
508 if (!pat_enabled)
509 return 1;
510
0124cecf
VP
511 while (cursor < to) {
512 if (!devmem_is_allowed(pfn)) {
365811d6
BH
513 printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
514 current->comm, from, to - 1);
0124cecf
VP
515 return 0;
516 }
517 cursor += PAGE_SIZE;
518 pfn++;
519 }
520 return 1;
521}
d092633b 522#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 523
f0970c13 524int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
525 unsigned long size, pgprot_t *vma_prot)
526{
0c3c8a18 527 unsigned long flags = _PAGE_CACHE_WB;
f0970c13 528
0124cecf
VP
529 if (!range_is_allowed(pfn, size))
530 return 0;
531
6b2f3d1f 532 if (file->f_flags & O_DSYNC)
28df82eb 533 flags = _PAGE_CACHE_UC_MINUS;
f0970c13 534
535#ifdef CONFIG_X86_32
536 /*
537 * On the PPro and successors, the MTRRs are used to set
538 * memory types for physical addresses outside main memory,
539 * so blindly setting UC or PWT on those pages is wrong.
540 * For Pentiums and earlier, the surround logic should disable
541 * caching for the high addresses through the KEN pin, but
542 * we maintain the tradition of paranoia in this code.
543 */
499f8f84 544 if (!pat_enabled &&
cd7a4e93
AH
545 !(boot_cpu_has(X86_FEATURE_MTRR) ||
546 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
547 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
548 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
549 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e7f260a2 550 flags = _PAGE_CACHE_UC;
f0970c13 551 }
552#endif
553
e7f260a2 554 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
555 flags);
f0970c13 556 return 1;
557}
e7f260a2 558
7880f746
VP
559/*
560 * Change the memory type for the physial address range in kernel identity
561 * mapping space if that range is a part of identity map.
562 */
563int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
564{
565 unsigned long id_sz;
566
a25b9316 567 if (base > __pa(high_memory-1))
7880f746
VP
568 return 0;
569
60f583d5
DH
570 /*
571 * some areas in the middle of the kernel identity range
572 * are not mapped, like the PCI space.
573 */
574 if (!page_is_ram(base >> PAGE_SHIFT))
575 return 0;
576
a25b9316 577 id_sz = (__pa(high_memory-1) <= base + size) ?
7880f746
VP
578 __pa(high_memory) - base :
579 size;
580
581 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
365811d6
BH
582 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
583 "for [mem %#010Lx-%#010Lx]\n",
7880f746
VP
584 current->comm, current->pid,
585 cattr_name(flags),
365811d6 586 base, (unsigned long long)(base + size-1));
7880f746
VP
587 return -EINVAL;
588 }
589 return 0;
590}
591
5899329b 592/*
593 * Internal interface to reserve a range of physical memory with prot.
594 * Reserved non RAM regions only and after successful reserve_memtype,
595 * this func also keeps identity mapping (if any) in sync with this new prot.
596 */
cdecff68 597static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
598 int strict_prot)
5899329b 599{
600 int is_ram = 0;
7880f746 601 int ret;
cdecff68 602 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
0c3c8a18 603 unsigned long flags = want_flags;
5899329b 604
be03d9e8 605 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 606
be03d9e8 607 /*
d886c73c
VP
608 * reserve_pfn_range() for RAM pages. We do not refcount to keep
609 * track of number of mappings of RAM pages. We can assert that
610 * the type requested matches the type of first page in the range.
be03d9e8 611 */
d886c73c
VP
612 if (is_ram) {
613 if (!pat_enabled)
614 return 0;
615
2a374698 616 flags = cachemode2protval(lookup_memtype(paddr));
d886c73c 617 if (want_flags != flags) {
365811d6 618 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c
VP
619 current->comm, current->pid,
620 cattr_name(want_flags),
621 (unsigned long long)paddr,
365811d6 622 (unsigned long long)(paddr + size - 1),
d886c73c
VP
623 cattr_name(flags));
624 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
625 (~_PAGE_CACHE_MASK)) |
626 flags);
627 }
4bb9c5c0 628 return 0;
d886c73c 629 }
5899329b 630
631 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
632 if (ret)
633 return ret;
634
635 if (flags != want_flags) {
1adcaafe 636 if (strict_prot ||
d85f3334
JG
637 !is_new_memtype_allowed(paddr, size,
638 pgprot2cachemode(__pgprot(want_flags)),
639 pgprot2cachemode(__pgprot(flags)))) {
cdecff68 640 free_memtype(paddr, paddr + size);
641 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
365811d6 642 " for [mem %#010Lx-%#010Lx], got %s\n",
cdecff68 643 current->comm, current->pid,
644 cattr_name(want_flags),
645 (unsigned long long)paddr,
365811d6 646 (unsigned long long)(paddr + size - 1),
cdecff68 647 cattr_name(flags));
648 return -EINVAL;
649 }
650 /*
651 * We allow returning different type than the one requested in
652 * non strict case.
653 */
654 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
655 (~_PAGE_CACHE_MASK)) |
656 flags);
5899329b 657 }
658
7880f746 659 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
5899329b 660 free_memtype(paddr, paddr + size);
5899329b 661 return -EINVAL;
662 }
663 return 0;
664}
665
666/*
667 * Internal interface to free a range of physical memory.
668 * Frees non RAM regions only.
669 */
670static void free_pfn_range(u64 paddr, unsigned long size)
671{
672 int is_ram;
673
be03d9e8 674 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 675 if (is_ram == 0)
676 free_memtype(paddr, paddr + size);
677}
678
679/*
5180da41 680 * track_pfn_copy is called when vma that is covering the pfnmap gets
5899329b 681 * copied through copy_page_range().
682 *
683 * If the vma has a linear pfn mapping for the entire range, we get the prot
684 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 685 */
5180da41 686int track_pfn_copy(struct vm_area_struct *vma)
5899329b 687{
c1c15b65 688 resource_size_t paddr;
982d789a 689 unsigned long prot;
4b065046 690 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 691 pgprot_t pgprot;
5899329b 692
b3b9c293 693 if (vma->vm_flags & VM_PAT) {
5899329b 694 /*
982d789a 695 * reserve the whole chunk covered by vma. We need the
696 * starting address and protection from pte.
5899329b 697 */
4b065046 698 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 699 WARN_ON_ONCE(1);
982d789a 700 return -EINVAL;
5899329b 701 }
cdecff68 702 pgprot = __pgprot(prot);
703 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 704 }
705
5899329b 706 return 0;
5899329b 707}
708
709/*
5899329b 710 * prot is passed in as a parameter for the new mapping. If the vma has a
711 * linear pfn mapping for the entire range reserve the entire vma range with
712 * single reserve_pfn_range call.
5899329b 713 */
5180da41 714int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293 715 unsigned long pfn, unsigned long addr, unsigned long size)
5899329b 716{
b1a86e15 717 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
2a374698 718 enum page_cache_mode pcm;
5899329b 719
b1a86e15 720 /* reserve the whole chunk starting from paddr */
b3b9c293
KK
721 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
722 int ret;
723
724 ret = reserve_pfn_range(paddr, size, prot, 0);
725 if (!ret)
726 vma->vm_flags |= VM_PAT;
727 return ret;
728 }
5899329b 729
10876376
VP
730 if (!pat_enabled)
731 return 0;
732
5180da41
SS
733 /*
734 * For anything smaller than the vma size we set prot based on the
735 * lookup.
736 */
2a374698 737 pcm = lookup_memtype(paddr);
5180da41
SS
738
739 /* Check memtype for the remaining pages */
740 while (size > PAGE_SIZE) {
741 size -= PAGE_SIZE;
742 paddr += PAGE_SIZE;
2a374698 743 if (pcm != lookup_memtype(paddr))
5180da41
SS
744 return -EINVAL;
745 }
746
747 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
2a374698 748 cachemode2protval(pcm));
5180da41
SS
749
750 return 0;
751}
752
753int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
754 unsigned long pfn)
755{
2a374698 756 enum page_cache_mode pcm;
5180da41
SS
757
758 if (!pat_enabled)
759 return 0;
760
761 /* Set prot based on lookup */
2a374698 762 pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
10876376 763 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
2a374698 764 cachemode2protval(pcm));
10876376 765
5899329b 766 return 0;
5899329b 767}
768
769/*
5180da41 770 * untrack_pfn is called while unmapping a pfnmap for a region.
5899329b 771 * untrack can be called for a specific region indicated by pfn and size or
b1a86e15 772 * can be for the entire vma (in which case pfn, size are zero).
5899329b 773 */
5180da41
SS
774void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
775 unsigned long size)
5899329b 776{
c1c15b65 777 resource_size_t paddr;
b1a86e15 778 unsigned long prot;
5899329b 779
b3b9c293 780 if (!(vma->vm_flags & VM_PAT))
5899329b 781 return;
b1a86e15
SS
782
783 /* free the chunk starting from pfn or the whole chunk */
784 paddr = (resource_size_t)pfn << PAGE_SHIFT;
785 if (!paddr && !size) {
786 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
787 WARN_ON_ONCE(1);
788 return;
789 }
790
791 size = vma->vm_end - vma->vm_start;
5899329b 792 }
b1a86e15 793 free_pfn_range(paddr, size);
b3b9c293 794 vma->vm_flags &= ~VM_PAT;
5899329b 795}
796
2520bd31 797pgprot_t pgprot_writecombine(pgprot_t prot)
798{
799 if (pat_enabled)
800 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
801 else
802 return pgprot_noncached(prot);
803}
92b9af9e 804EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 805
012f09e7 806#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 807
fec0962e 808static struct memtype *memtype_get_idx(loff_t pos)
809{
be5a0c12 810 struct memtype *print_entry;
811 int ret;
fec0962e 812
be5a0c12 813 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
fec0962e 814 if (!print_entry)
815 return NULL;
816
817 spin_lock(&memtype_lock);
9e41a49a 818 ret = rbt_memtype_copy_nth_element(print_entry, pos);
fec0962e 819 spin_unlock(&memtype_lock);
ad2cde16 820
be5a0c12 821 if (!ret) {
822 return print_entry;
823 } else {
824 kfree(print_entry);
825 return NULL;
826 }
fec0962e 827}
828
829static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
830{
831 if (*pos == 0) {
832 ++*pos;
833 seq_printf(seq, "PAT memtype list:\n");
834 }
835
836 return memtype_get_idx(*pos);
837}
838
839static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
840{
841 ++*pos;
842 return memtype_get_idx(*pos);
843}
844
845static void memtype_seq_stop(struct seq_file *seq, void *v)
846{
847}
848
849static int memtype_seq_show(struct seq_file *seq, void *v)
850{
851 struct memtype *print_entry = (struct memtype *)v;
852
853 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
854 print_entry->start, print_entry->end);
855 kfree(print_entry);
ad2cde16 856
fec0962e 857 return 0;
858}
859
d535e431 860static const struct seq_operations memtype_seq_ops = {
fec0962e 861 .start = memtype_seq_start,
862 .next = memtype_seq_next,
863 .stop = memtype_seq_stop,
864 .show = memtype_seq_show,
865};
866
867static int memtype_seq_open(struct inode *inode, struct file *file)
868{
869 return seq_open(file, &memtype_seq_ops);
870}
871
872static const struct file_operations memtype_fops = {
873 .open = memtype_seq_open,
874 .read = seq_read,
875 .llseek = seq_lseek,
876 .release = seq_release,
877};
878
879static int __init pat_memtype_list_init(void)
880{
dd4377b0
XF
881 if (pat_enabled) {
882 debugfs_create_file("pat_memtype_list", S_IRUSR,
883 arch_debugfs_dir, NULL, &memtype_fops);
884 }
fec0962e 885 return 0;
886}
887
888late_initcall(pat_memtype_list_init);
889
012f09e7 890#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */