x86: Respect PAT bit when copying pte values between large and normal pages
[linux-2.6-block.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
5a0e3ad6 15#include <linux/slab.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
335ef896 18#include <linux/rbtree.h>
2e5d9c85 19
ad2cde16 20#include <asm/cacheflush.h>
2e5d9c85 21#include <asm/processor.h>
ad2cde16 22#include <asm/tlbflush.h>
fd12a0d6 23#include <asm/x86_init.h>
2e5d9c85 24#include <asm/pgtable.h>
2e5d9c85 25#include <asm/fcntl.h>
ad2cde16 26#include <asm/e820.h>
2e5d9c85 27#include <asm/mtrr.h>
ad2cde16
IM
28#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
e7f260a2 31#include <asm/io.h>
2e5d9c85 32
be5a0c12 33#include "pat_internal.h"
34
8d4a4300 35#ifdef CONFIG_X86_PAT
499f8f84 36int __read_mostly pat_enabled = 1;
2e5d9c85 37
1ee4bd92 38static inline void pat_disable(const char *reason)
2e5d9c85 39{
499f8f84 40 pat_enabled = 0;
8d4a4300 41 printk(KERN_INFO "%s\n", reason);
2e5d9c85 42}
2e5d9c85 43
be524fb9 44static int __init nopat(char *str)
2e5d9c85 45{
8d4a4300 46 pat_disable("PAT support disabled.");
2e5d9c85 47 return 0;
48}
8d4a4300 49early_param("nopat", nopat);
75a04811
PA
50#else
51static inline void pat_disable(const char *reason)
52{
53 (void)reason;
54}
8d4a4300
TG
55#endif
56
77b52b4c 57
be5a0c12 58int pat_debug_enable;
ad2cde16 59
77b52b4c
VP
60static int __init pat_debug_setup(char *str)
61{
be5a0c12 62 pat_debug_enable = 1;
77b52b4c
VP
63 return 0;
64}
65__setup("debugpat", pat_debug_setup);
66
8d4a4300 67static u64 __read_mostly boot_pat_state;
2e5d9c85 68
69enum {
70 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */
72 PAT_WT = 4, /* Write Through */
73 PAT_WP = 5, /* Write Protected */
74 PAT_WB = 6, /* Write Back (default) */
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76};
77
cd7a4e93 78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 79
80void pat_init(void)
81{
82 u64 pat;
e23a8b6a 83 bool boot_cpu = !boot_pat_state;
2e5d9c85 84
499f8f84 85 if (!pat_enabled)
2e5d9c85 86 return;
87
75a04811
PA
88 if (!cpu_has_pat) {
89 if (!boot_pat_state) {
90 pat_disable("PAT not supported by CPU.");
91 return;
92 } else {
93 /*
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
96 * undo PAT.
97 */
98 printk(KERN_ERR "PAT enabled, "
99 "but not supported by secondary CPU\n");
100 BUG();
101 }
8d4a4300 102 }
2e5d9c85 103
104 /* Set PWT to Write-Combining. All other bits stay the same */
105 /*
106 * PTE encoding used in Linux:
107 * PAT
108 * |PCD
109 * ||PWT
110 * |||
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
115 * PAT bit unused
116 */
cd7a4e93
AH
117 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
118 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 119
120 /* Boot CPU check */
8d4a4300 121 if (!boot_pat_state)
2e5d9c85 122 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 123
124 wrmsrl(MSR_IA32_CR_PAT, pat);
e23a8b6a
RD
125
126 if (boot_cpu)
127 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128 smp_processor_id(), boot_pat_state, pat);
2e5d9c85 129}
130
131#undef PAT
132
9e41a49a 133static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 134
2e5d9c85 135/*
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a
141 */
e00c8cc9
JG
142static unsigned long pat_x_mtrr_type(u64 start, u64 end,
143 enum page_cache_mode req_type)
2e5d9c85 144{
c26421d0
VP
145 /*
146 * Look for MTRR hint to get the effective type in case where PAT
147 * request is for WB.
148 */
e00c8cc9 149 if (req_type == _PAGE_CACHE_MODE_WB) {
dd0c7c49
AH
150 u8 mtrr_type;
151
152 mtrr_type = mtrr_type_lookup(start, end);
b6ff32d9 153 if (mtrr_type != MTRR_TYPE_WRBACK)
e00c8cc9 154 return _PAGE_CACHE_MODE_UC_MINUS;
b6ff32d9 155
e00c8cc9 156 return _PAGE_CACHE_MODE_WB;
dd0c7c49
AH
157 }
158
159 return req_type;
2e5d9c85 160}
161
fa83523f
JD
162struct pagerange_state {
163 unsigned long cur_pfn;
164 int ram;
165 int not_ram;
166};
167
168static int
169pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
170{
171 struct pagerange_state *state = arg;
172
173 state->not_ram |= initial_pfn > state->cur_pfn;
174 state->ram |= total_nr_pages > 0;
175 state->cur_pfn = initial_pfn + total_nr_pages;
176
177 return state->ram && state->not_ram;
178}
179
3709c857 180static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8 181{
fa83523f
JD
182 int ret = 0;
183 unsigned long start_pfn = start >> PAGE_SHIFT;
184 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
185 struct pagerange_state state = {start_pfn, 0, 0};
186
187 /*
188 * For legacy reasons, physical address range in the legacy ISA
189 * region is tracked as non-RAM. This will allow users of
190 * /dev/mem to map portions of legacy ISA region, even when
191 * some of those portions are listed(or not even listed) with
192 * different e820 types(RAM/reserved/..)
193 */
194 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
195 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
196
197 if (start_pfn < end_pfn) {
198 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
199 &state, pagerange_is_ram_callback);
be03d9e8
SS
200 }
201
fa83523f 202 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
be03d9e8
SS
203}
204
9542ada8 205/*
f5841740
VP
206 * For RAM pages, we use page flags to mark the pages with appropriate type.
207 * Here we do two pass:
208 * - Find the memtype of all the pages in the range, look for any conflicts
209 * - In case of no conflicts, set the new memtype for pages in the range
9542ada8 210 */
e00c8cc9
JG
211static int reserve_ram_pages_type(u64 start, u64 end,
212 enum page_cache_mode req_type,
213 enum page_cache_mode *new_type)
9542ada8
SS
214{
215 struct page *page;
f5841740
VP
216 u64 pfn;
217
e00c8cc9 218 if (req_type == _PAGE_CACHE_MODE_UC) {
f5841740
VP
219 /* We do not support strong UC */
220 WARN_ON_ONCE(1);
e00c8cc9 221 req_type = _PAGE_CACHE_MODE_UC_MINUS;
f5841740 222 }
9542ada8
SS
223
224 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
e00c8cc9 225 enum page_cache_mode type;
9542ada8 226
f5841740
VP
227 page = pfn_to_page(pfn);
228 type = get_page_memtype(page);
229 if (type != -1) {
e00c8cc9 230 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
365811d6 231 start, end - 1, type, req_type);
f5841740
VP
232 if (new_type)
233 *new_type = type;
234
235 return -EBUSY;
236 }
9542ada8 237 }
9542ada8 238
f5841740
VP
239 if (new_type)
240 *new_type = req_type;
241
242 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 243 page = pfn_to_page(pfn);
f5841740 244 set_page_memtype(page, req_type);
9542ada8 245 }
f5841740 246 return 0;
9542ada8
SS
247}
248
249static int free_ram_pages_type(u64 start, u64 end)
250{
251 struct page *page;
f5841740 252 u64 pfn;
9542ada8
SS
253
254 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
255 page = pfn_to_page(pfn);
f5841740 256 set_page_memtype(page, -1);
9542ada8
SS
257 }
258 return 0;
9542ada8
SS
259}
260
e7f260a2 261/*
262 * req_type typically has one of the:
e00c8cc9
JG
263 * - _PAGE_CACHE_MODE_WB
264 * - _PAGE_CACHE_MODE_WC
265 * - _PAGE_CACHE_MODE_UC_MINUS
266 * - _PAGE_CACHE_MODE_UC
e7f260a2 267 *
ac97991e
AH
268 * If new_type is NULL, function will return an error if it cannot reserve the
269 * region with req_type. If new_type is non-NULL, function will return
270 * available type in new_type in case of no error. In case of any error
e7f260a2 271 * it will return a negative return value.
272 */
e00c8cc9
JG
273int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
274 enum page_cache_mode *new_type)
2e5d9c85 275{
be5a0c12 276 struct memtype *new;
e00c8cc9 277 enum page_cache_mode actual_type;
9542ada8 278 int is_range_ram;
ad2cde16 279 int err = 0;
2e5d9c85 280
ad2cde16 281 BUG_ON(start >= end); /* end is exclusive */
69e26be9 282
499f8f84 283 if (!pat_enabled) {
e7f260a2 284 /* This is identical to page table setting without PAT */
ac97991e 285 if (new_type) {
e00c8cc9
JG
286 if (req_type == _PAGE_CACHE_MODE_WC)
287 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
ac97991e 288 else
e00c8cc9 289 *new_type = req_type;
e7f260a2 290 }
2e5d9c85 291 return 0;
292 }
293
294 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 295 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e 296 if (new_type)
e00c8cc9 297 *new_type = _PAGE_CACHE_MODE_WB;
2e5d9c85 298 return 0;
299 }
300
b6ff32d9
SS
301 /*
302 * Call mtrr_lookup to get the type hint. This is an
303 * optimization for /dev/mem mmap'ers into WB memory (BIOS
304 * tools and ACPI tools). Use WB request for WB memory and use
305 * UC_MINUS otherwise.
306 */
e00c8cc9 307 actual_type = pat_x_mtrr_type(start, end, req_type);
2e5d9c85 308
95971342
SS
309 if (new_type)
310 *new_type = actual_type;
311
be03d9e8 312 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
313 if (is_range_ram == 1) {
314
f5841740 315 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
316
317 return err;
318 } else if (is_range_ram < 0) {
9542ada8 319 return -EINVAL;
f5841740 320 }
9542ada8 321
6a4f3b52 322 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
ac97991e 323 if (!new)
2e5d9c85 324 return -ENOMEM;
325
ad2cde16
IM
326 new->start = start;
327 new->end = end;
328 new->type = actual_type;
2e5d9c85 329
2e5d9c85 330 spin_lock(&memtype_lock);
331
9e41a49a 332 err = rbt_memtype_check_insert(new, new_type);
2e5d9c85 333 if (err) {
365811d6
BH
334 printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
335 start, end - 1,
336 cattr_name(new->type), cattr_name(req_type));
ac97991e 337 kfree(new);
2e5d9c85 338 spin_unlock(&memtype_lock);
ad2cde16 339
2e5d9c85 340 return err;
341 }
342
2e5d9c85 343 spin_unlock(&memtype_lock);
3e9c83b3 344
365811d6
BH
345 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
346 start, end - 1, cattr_name(new->type), cattr_name(req_type),
3e9c83b3
AH
347 new_type ? cattr_name(*new_type) : "-");
348
2e5d9c85 349 return err;
350}
351
352int free_memtype(u64 start, u64 end)
353{
2e5d9c85 354 int err = -EINVAL;
9542ada8 355 int is_range_ram;
20413f27 356 struct memtype *entry;
2e5d9c85 357
69e26be9 358 if (!pat_enabled)
2e5d9c85 359 return 0;
2e5d9c85 360
361 /* Low ISA region is always mapped WB. No need to track */
8a271389 362 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 363 return 0;
2e5d9c85 364
be03d9e8 365 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
366 if (is_range_ram == 1) {
367
f5841740 368 err = free_ram_pages_type(start, end);
f5841740
VP
369
370 return err;
371 } else if (is_range_ram < 0) {
9542ada8 372 return -EINVAL;
f5841740 373 }
9542ada8 374
2e5d9c85 375 spin_lock(&memtype_lock);
20413f27 376 entry = rbt_memtype_erase(start, end);
2e5d9c85 377 spin_unlock(&memtype_lock);
378
20413f27 379 if (!entry) {
365811d6
BH
380 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
381 current->comm, current->pid, start, end - 1);
20413f27 382 return -EINVAL;
2e5d9c85 383 }
6997ab49 384
20413f27
XF
385 kfree(entry);
386
365811d6 387 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 388
20413f27 389 return 0;
2e5d9c85 390}
391
f0970c13 392
637b86e7
VP
393/**
394 * lookup_memtype - Looksup the memory type for a physical address
395 * @paddr: physical address of which memory type needs to be looked up
396 *
397 * Only to be called when PAT is enabled
398 *
2a374698
JG
399 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
400 * or _PAGE_CACHE_MODE_UC
637b86e7 401 */
2a374698 402static enum page_cache_mode lookup_memtype(u64 paddr)
637b86e7 403{
2a374698 404 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
405 struct memtype *entry;
406
8a271389 407 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
408 return rettype;
409
410 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
411 struct page *page;
637b86e7 412 page = pfn_to_page(paddr >> PAGE_SHIFT);
e00c8cc9 413 rettype = get_page_memtype(page);
637b86e7
VP
414 /*
415 * -1 from get_page_memtype() implies RAM page is in its
416 * default state and not reserved, and hence of type WB
417 */
418 if (rettype == -1)
2a374698 419 rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
420
421 return rettype;
422 }
423
424 spin_lock(&memtype_lock);
425
9e41a49a 426 entry = rbt_memtype_lookup(paddr);
637b86e7 427 if (entry != NULL)
e00c8cc9 428 rettype = entry->type;
637b86e7 429 else
2a374698 430 rettype = _PAGE_CACHE_MODE_UC_MINUS;
637b86e7
VP
431
432 spin_unlock(&memtype_lock);
433 return rettype;
434}
435
9fd126bc
VP
436/**
437 * io_reserve_memtype - Request a memory type mapping for a region of memory
438 * @start: start (physical address) of the region
439 * @end: end (physical address) of the region
440 * @type: A pointer to memtype, with requested type. On success, requested
441 * or any other compatible type that was available for the region is returned
442 *
443 * On success, returns 0
444 * On failure, returns non-zero
445 */
446int io_reserve_memtype(resource_size_t start, resource_size_t end,
49a3b3cb 447 enum page_cache_mode *type)
9fd126bc 448{
b855192c 449 resource_size_t size = end - start;
49a3b3cb
JG
450 enum page_cache_mode req_type = *type;
451 enum page_cache_mode new_type;
9fd126bc
VP
452 int ret;
453
b855192c 454 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc 455
e00c8cc9 456 ret = reserve_memtype(start, end, req_type, &new_type);
9fd126bc
VP
457 if (ret)
458 goto out_err;
459
49a3b3cb 460 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
461 goto out_free;
462
b14097bd 463 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
464 goto out_free;
465
466 *type = new_type;
467 return 0;
468
469out_free:
470 free_memtype(start, end);
471 ret = -EBUSY;
472out_err:
473 return ret;
474}
475
476/**
477 * io_free_memtype - Release a memory type mapping for a region of memory
478 * @start: start (physical address) of the region
479 * @end: end (physical address) of the region
480 */
481void io_free_memtype(resource_size_t start, resource_size_t end)
482{
483 free_memtype(start, end);
484}
485
f0970c13 486pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
487 unsigned long size, pgprot_t vma_prot)
488{
489 return vma_prot;
490}
491
d092633b
IM
492#ifdef CONFIG_STRICT_DEVMEM
493/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
494static inline int range_is_allowed(unsigned long pfn, unsigned long size)
495{
496 return 1;
497}
498#else
9e41bff2 499/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
500static inline int range_is_allowed(unsigned long pfn, unsigned long size)
501{
502 u64 from = ((u64)pfn) << PAGE_SHIFT;
503 u64 to = from + size;
504 u64 cursor = from;
505
9e41bff2
RT
506 if (!pat_enabled)
507 return 1;
508
0124cecf
VP
509 while (cursor < to) {
510 if (!devmem_is_allowed(pfn)) {
365811d6
BH
511 printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
512 current->comm, from, to - 1);
0124cecf
VP
513 return 0;
514 }
515 cursor += PAGE_SIZE;
516 pfn++;
517 }
518 return 1;
519}
d092633b 520#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 521
f0970c13 522int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
523 unsigned long size, pgprot_t *vma_prot)
524{
e00c8cc9 525 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
f0970c13 526
0124cecf
VP
527 if (!range_is_allowed(pfn, size))
528 return 0;
529
6b2f3d1f 530 if (file->f_flags & O_DSYNC)
e00c8cc9 531 pcm = _PAGE_CACHE_MODE_UC_MINUS;
f0970c13 532
533#ifdef CONFIG_X86_32
534 /*
535 * On the PPro and successors, the MTRRs are used to set
536 * memory types for physical addresses outside main memory,
537 * so blindly setting UC or PWT on those pages is wrong.
538 * For Pentiums and earlier, the surround logic should disable
539 * caching for the high addresses through the KEN pin, but
540 * we maintain the tradition of paranoia in this code.
541 */
499f8f84 542 if (!pat_enabled &&
cd7a4e93
AH
543 !(boot_cpu_has(X86_FEATURE_MTRR) ||
544 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
545 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
546 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
547 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e00c8cc9 548 pcm = _PAGE_CACHE_MODE_UC;
f0970c13 549 }
550#endif
551
e7f260a2 552 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
e00c8cc9 553 cachemode2protval(pcm));
f0970c13 554 return 1;
555}
e7f260a2 556
7880f746
VP
557/*
558 * Change the memory type for the physial address range in kernel identity
559 * mapping space if that range is a part of identity map.
560 */
b14097bd
JG
561int kernel_map_sync_memtype(u64 base, unsigned long size,
562 enum page_cache_mode pcm)
7880f746
VP
563{
564 unsigned long id_sz;
565
a25b9316 566 if (base > __pa(high_memory-1))
7880f746
VP
567 return 0;
568
60f583d5
DH
569 /*
570 * some areas in the middle of the kernel identity range
571 * are not mapped, like the PCI space.
572 */
573 if (!page_is_ram(base >> PAGE_SHIFT))
574 return 0;
575
a25b9316 576 id_sz = (__pa(high_memory-1) <= base + size) ?
7880f746
VP
577 __pa(high_memory) - base :
578 size;
579
b14097bd 580 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
365811d6
BH
581 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
582 "for [mem %#010Lx-%#010Lx]\n",
7880f746 583 current->comm, current->pid,
e00c8cc9 584 cattr_name(pcm),
365811d6 585 base, (unsigned long long)(base + size-1));
7880f746
VP
586 return -EINVAL;
587 }
588 return 0;
589}
590
5899329b 591/*
592 * Internal interface to reserve a range of physical memory with prot.
593 * Reserved non RAM regions only and after successful reserve_memtype,
594 * this func also keeps identity mapping (if any) in sync with this new prot.
595 */
cdecff68 596static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
597 int strict_prot)
5899329b 598{
599 int is_ram = 0;
7880f746 600 int ret;
e00c8cc9
JG
601 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
602 enum page_cache_mode pcm = want_pcm;
5899329b 603
be03d9e8 604 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 605
be03d9e8 606 /*
d886c73c
VP
607 * reserve_pfn_range() for RAM pages. We do not refcount to keep
608 * track of number of mappings of RAM pages. We can assert that
609 * the type requested matches the type of first page in the range.
be03d9e8 610 */
d886c73c
VP
611 if (is_ram) {
612 if (!pat_enabled)
613 return 0;
614
e00c8cc9
JG
615 pcm = lookup_memtype(paddr);
616 if (want_pcm != pcm) {
365811d6 617 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c 618 current->comm, current->pid,
e00c8cc9 619 cattr_name(want_pcm),
d886c73c 620 (unsigned long long)paddr,
365811d6 621 (unsigned long long)(paddr + size - 1),
e00c8cc9 622 cattr_name(pcm));
d886c73c 623 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
e00c8cc9
JG
624 (~_PAGE_CACHE_MASK)) |
625 cachemode2protval(pcm));
d886c73c 626 }
4bb9c5c0 627 return 0;
d886c73c 628 }
5899329b 629
e00c8cc9 630 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
5899329b 631 if (ret)
632 return ret;
633
e00c8cc9 634 if (pcm != want_pcm) {
1adcaafe 635 if (strict_prot ||
e00c8cc9 636 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
cdecff68 637 free_memtype(paddr, paddr + size);
638 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
365811d6 639 " for [mem %#010Lx-%#010Lx], got %s\n",
cdecff68 640 current->comm, current->pid,
e00c8cc9 641 cattr_name(want_pcm),
cdecff68 642 (unsigned long long)paddr,
365811d6 643 (unsigned long long)(paddr + size - 1),
e00c8cc9 644 cattr_name(pcm));
cdecff68 645 return -EINVAL;
646 }
647 /*
648 * We allow returning different type than the one requested in
649 * non strict case.
650 */
651 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
652 (~_PAGE_CACHE_MASK)) |
e00c8cc9 653 cachemode2protval(pcm));
5899329b 654 }
655
e00c8cc9 656 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
5899329b 657 free_memtype(paddr, paddr + size);
5899329b 658 return -EINVAL;
659 }
660 return 0;
661}
662
663/*
664 * Internal interface to free a range of physical memory.
665 * Frees non RAM regions only.
666 */
667static void free_pfn_range(u64 paddr, unsigned long size)
668{
669 int is_ram;
670
be03d9e8 671 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 672 if (is_ram == 0)
673 free_memtype(paddr, paddr + size);
674}
675
676/*
5180da41 677 * track_pfn_copy is called when vma that is covering the pfnmap gets
5899329b 678 * copied through copy_page_range().
679 *
680 * If the vma has a linear pfn mapping for the entire range, we get the prot
681 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 682 */
5180da41 683int track_pfn_copy(struct vm_area_struct *vma)
5899329b 684{
c1c15b65 685 resource_size_t paddr;
982d789a 686 unsigned long prot;
4b065046 687 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 688 pgprot_t pgprot;
5899329b 689
b3b9c293 690 if (vma->vm_flags & VM_PAT) {
5899329b 691 /*
982d789a 692 * reserve the whole chunk covered by vma. We need the
693 * starting address and protection from pte.
5899329b 694 */
4b065046 695 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 696 WARN_ON_ONCE(1);
982d789a 697 return -EINVAL;
5899329b 698 }
cdecff68 699 pgprot = __pgprot(prot);
700 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 701 }
702
5899329b 703 return 0;
5899329b 704}
705
706/*
5899329b 707 * prot is passed in as a parameter for the new mapping. If the vma has a
708 * linear pfn mapping for the entire range reserve the entire vma range with
709 * single reserve_pfn_range call.
5899329b 710 */
5180da41 711int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293 712 unsigned long pfn, unsigned long addr, unsigned long size)
5899329b 713{
b1a86e15 714 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
2a374698 715 enum page_cache_mode pcm;
5899329b 716
b1a86e15 717 /* reserve the whole chunk starting from paddr */
b3b9c293
KK
718 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
719 int ret;
720
721 ret = reserve_pfn_range(paddr, size, prot, 0);
722 if (!ret)
723 vma->vm_flags |= VM_PAT;
724 return ret;
725 }
5899329b 726
10876376
VP
727 if (!pat_enabled)
728 return 0;
729
5180da41
SS
730 /*
731 * For anything smaller than the vma size we set prot based on the
732 * lookup.
733 */
2a374698 734 pcm = lookup_memtype(paddr);
5180da41
SS
735
736 /* Check memtype for the remaining pages */
737 while (size > PAGE_SIZE) {
738 size -= PAGE_SIZE;
739 paddr += PAGE_SIZE;
2a374698 740 if (pcm != lookup_memtype(paddr))
5180da41
SS
741 return -EINVAL;
742 }
743
744 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
2a374698 745 cachemode2protval(pcm));
5180da41
SS
746
747 return 0;
748}
749
750int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
751 unsigned long pfn)
752{
2a374698 753 enum page_cache_mode pcm;
5180da41
SS
754
755 if (!pat_enabled)
756 return 0;
757
758 /* Set prot based on lookup */
2a374698 759 pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
10876376 760 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
2a374698 761 cachemode2protval(pcm));
10876376 762
5899329b 763 return 0;
5899329b 764}
765
766/*
5180da41 767 * untrack_pfn is called while unmapping a pfnmap for a region.
5899329b 768 * untrack can be called for a specific region indicated by pfn and size or
b1a86e15 769 * can be for the entire vma (in which case pfn, size are zero).
5899329b 770 */
5180da41
SS
771void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
772 unsigned long size)
5899329b 773{
c1c15b65 774 resource_size_t paddr;
b1a86e15 775 unsigned long prot;
5899329b 776
b3b9c293 777 if (!(vma->vm_flags & VM_PAT))
5899329b 778 return;
b1a86e15
SS
779
780 /* free the chunk starting from pfn or the whole chunk */
781 paddr = (resource_size_t)pfn << PAGE_SHIFT;
782 if (!paddr && !size) {
783 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
784 WARN_ON_ONCE(1);
785 return;
786 }
787
788 size = vma->vm_end - vma->vm_start;
5899329b 789 }
b1a86e15 790 free_pfn_range(paddr, size);
b3b9c293 791 vma->vm_flags &= ~VM_PAT;
5899329b 792}
793
2520bd31 794pgprot_t pgprot_writecombine(pgprot_t prot)
795{
796 if (pat_enabled)
e00c8cc9
JG
797 return __pgprot(pgprot_val(prot) |
798 cachemode2protval(_PAGE_CACHE_MODE_WC));
2520bd31 799 else
800 return pgprot_noncached(prot);
801}
92b9af9e 802EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 803
012f09e7 804#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 805
fec0962e 806static struct memtype *memtype_get_idx(loff_t pos)
807{
be5a0c12 808 struct memtype *print_entry;
809 int ret;
fec0962e 810
be5a0c12 811 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
fec0962e 812 if (!print_entry)
813 return NULL;
814
815 spin_lock(&memtype_lock);
9e41a49a 816 ret = rbt_memtype_copy_nth_element(print_entry, pos);
fec0962e 817 spin_unlock(&memtype_lock);
ad2cde16 818
be5a0c12 819 if (!ret) {
820 return print_entry;
821 } else {
822 kfree(print_entry);
823 return NULL;
824 }
fec0962e 825}
826
827static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
828{
829 if (*pos == 0) {
830 ++*pos;
831 seq_printf(seq, "PAT memtype list:\n");
832 }
833
834 return memtype_get_idx(*pos);
835}
836
837static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
838{
839 ++*pos;
840 return memtype_get_idx(*pos);
841}
842
843static void memtype_seq_stop(struct seq_file *seq, void *v)
844{
845}
846
847static int memtype_seq_show(struct seq_file *seq, void *v)
848{
849 struct memtype *print_entry = (struct memtype *)v;
850
851 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
852 print_entry->start, print_entry->end);
853 kfree(print_entry);
ad2cde16 854
fec0962e 855 return 0;
856}
857
d535e431 858static const struct seq_operations memtype_seq_ops = {
fec0962e 859 .start = memtype_seq_start,
860 .next = memtype_seq_next,
861 .stop = memtype_seq_stop,
862 .show = memtype_seq_show,
863};
864
865static int memtype_seq_open(struct inode *inode, struct file *file)
866{
867 return seq_open(file, &memtype_seq_ops);
868}
869
870static const struct file_operations memtype_fops = {
871 .open = memtype_seq_open,
872 .read = seq_read,
873 .llseek = seq_lseek,
874 .release = seq_release,
875};
876
877static int __init pat_memtype_list_init(void)
878{
dd4377b0
XF
879 if (pat_enabled) {
880 debugfs_create_file("pat_memtype_list", S_IRUSR,
881 arch_debugfs_dir, NULL, &memtype_fops);
882 }
fec0962e 883 return 0;
884}
885
886late_initcall(pat_memtype_list_init);
887
012f09e7 888#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */