x86/cpa: make sure cpa is safe to call in lazy mmu mode
[linux-2.6-block.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
14#include <linux/gfp.h>
ad2cde16 15#include <linux/mm.h>
2e5d9c85 16#include <linux/fs.h>
17
ad2cde16 18#include <asm/cacheflush.h>
2e5d9c85 19#include <asm/processor.h>
ad2cde16 20#include <asm/tlbflush.h>
2e5d9c85 21#include <asm/pgtable.h>
2e5d9c85 22#include <asm/fcntl.h>
ad2cde16 23#include <asm/e820.h>
2e5d9c85 24#include <asm/mtrr.h>
ad2cde16
IM
25#include <asm/page.h>
26#include <asm/msr.h>
27#include <asm/pat.h>
e7f260a2 28#include <asm/io.h>
2e5d9c85 29
8d4a4300 30#ifdef CONFIG_X86_PAT
499f8f84 31int __read_mostly pat_enabled = 1;
2e5d9c85 32
31f4d870 33void __cpuinit pat_disable(char *reason)
2e5d9c85 34{
499f8f84 35 pat_enabled = 0;
8d4a4300 36 printk(KERN_INFO "%s\n", reason);
2e5d9c85 37}
2e5d9c85 38
be524fb9 39static int __init nopat(char *str)
2e5d9c85 40{
8d4a4300 41 pat_disable("PAT support disabled.");
2e5d9c85 42 return 0;
43}
8d4a4300
TG
44early_param("nopat", nopat);
45#endif
46
77b52b4c
VP
47
48static int debug_enable;
ad2cde16 49
77b52b4c
VP
50static int __init pat_debug_setup(char *str)
51{
52 debug_enable = 1;
53 return 0;
54}
55__setup("debugpat", pat_debug_setup);
56
57#define dprintk(fmt, arg...) \
58 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
59
60
8d4a4300 61static u64 __read_mostly boot_pat_state;
2e5d9c85 62
63enum {
64 PAT_UC = 0, /* uncached */
65 PAT_WC = 1, /* Write combining */
66 PAT_WT = 4, /* Write Through */
67 PAT_WP = 5, /* Write Protected */
68 PAT_WB = 6, /* Write Back (default) */
69 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
70};
71
cd7a4e93 72#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 73
74void pat_init(void)
75{
76 u64 pat;
77
499f8f84 78 if (!pat_enabled)
2e5d9c85 79 return;
80
8d4a4300 81 /* Paranoia check. */
97cfab6a 82 if (!cpu_has_pat && boot_pat_state) {
8d4a4300 83 /*
97cfab6a 84 * If this happens we are on a secondary CPU, but
8d4a4300
TG
85 * switched to PAT on the boot CPU. We have no way to
86 * undo PAT.
97cfab6a
AH
87 */
88 printk(KERN_ERR "PAT enabled, "
89 "but not supported by secondary CPU\n");
90 BUG();
8d4a4300 91 }
2e5d9c85 92
93 /* Set PWT to Write-Combining. All other bits stay the same */
94 /*
95 * PTE encoding used in Linux:
96 * PAT
97 * |PCD
98 * ||PWT
99 * |||
100 * 000 WB _PAGE_CACHE_WB
101 * 001 WC _PAGE_CACHE_WC
102 * 010 UC- _PAGE_CACHE_UC_MINUS
103 * 011 UC _PAGE_CACHE_UC
104 * PAT bit unused
105 */
cd7a4e93
AH
106 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
107 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 108
109 /* Boot CPU check */
8d4a4300 110 if (!boot_pat_state)
2e5d9c85 111 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 112
113 wrmsrl(MSR_IA32_CR_PAT, pat);
114 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
115 smp_processor_id(), boot_pat_state, pat);
116}
117
118#undef PAT
119
120static char *cattr_name(unsigned long flags)
121{
122 switch (flags & _PAGE_CACHE_MASK) {
cd7a4e93
AH
123 case _PAGE_CACHE_UC: return "uncached";
124 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
125 case _PAGE_CACHE_WB: return "write-back";
126 case _PAGE_CACHE_WC: return "write-combining";
127 default: return "broken";
2e5d9c85 128 }
129}
130
131/*
132 * The global memtype list keeps track of memory type for specific
133 * physical memory areas. Conflicting memory types in different
134 * mappings can cause CPU cache corruption. To avoid this we keep track.
135 *
136 * The list is sorted based on starting address and can contain multiple
137 * entries for each address (this allows reference counting for overlapping
138 * areas). All the aliases have the same cache attributes of course.
139 * Zero attributes are represented as holes.
140 *
141 * Currently the data structure is a list because the number of mappings
142 * are expected to be relatively small. If this should be a problem
143 * it could be changed to a rbtree or similar.
144 *
145 * memtype_lock protects the whole list.
146 */
147
148struct memtype {
ad2cde16
IM
149 u64 start;
150 u64 end;
151 unsigned long type;
152 struct list_head nd;
2e5d9c85 153};
154
155static LIST_HEAD(memtype_list);
ad2cde16 156static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
2e5d9c85 157
158/*
159 * Does intersection of PAT memory type and MTRR memory type and returns
160 * the resulting memory type as PAT understands it.
161 * (Type in pat and mtrr will not have same value)
162 * The intersection is based on "Effective Memory Type" tables in IA-32
163 * SDM vol 3a
164 */
6cf514fc 165static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
2e5d9c85 166{
c26421d0
VP
167 /*
168 * Look for MTRR hint to get the effective type in case where PAT
169 * request is for WB.
170 */
dd0c7c49
AH
171 if (req_type == _PAGE_CACHE_WB) {
172 u8 mtrr_type;
173
174 mtrr_type = mtrr_type_lookup(start, end);
175 if (mtrr_type == MTRR_TYPE_UNCACHABLE)
176 return _PAGE_CACHE_UC;
177 if (mtrr_type == MTRR_TYPE_WRCOMB)
178 return _PAGE_CACHE_WC;
179 }
180
181 return req_type;
2e5d9c85 182}
183
ad2cde16
IM
184static int
185chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
64fe44c3
AH
186{
187 if (new->type != entry->type) {
188 if (type) {
189 new->type = entry->type;
190 *type = entry->type;
191 } else
192 goto conflict;
193 }
194
195 /* check overlaps with more than one entry in the list */
196 list_for_each_entry_continue(entry, &memtype_list, nd) {
197 if (new->end <= entry->start)
198 break;
199 else if (new->type != entry->type)
200 goto conflict;
201 }
202 return 0;
203
204 conflict:
205 printk(KERN_INFO "%s:%d conflicting memory types "
206 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
207 new->end, cattr_name(new->type), cattr_name(entry->type));
208 return -EBUSY;
209}
210
80c5e73d
VP
211static struct memtype *cached_entry;
212static u64 cached_start;
213
9542ada8
SS
214/*
215 * For RAM pages, mark the pages as non WB memory type using
216 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
217 * set_memory_wc() on a RAM page at a time before marking it as WB again.
218 * This is ok, because only one driver will be owning the page and
219 * doing set_memory_*() calls.
220 *
221 * For now, we use PageNonWB to track that the RAM page is being mapped
222 * as non WB. In future, we will have to use one more flag
223 * (or some other mechanism in page_struct) to distinguish between
224 * UC and WC mapping.
225 */
226static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
ad2cde16 227 unsigned long *new_type)
9542ada8
SS
228{
229 struct page *page;
230 u64 pfn, end_pfn;
231
232 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
233 page = pfn_to_page(pfn);
234 if (page_mapped(page) || PageNonWB(page))
235 goto out;
236
237 SetPageNonWB(page);
238 }
239 return 0;
240
241out:
242 end_pfn = pfn;
243 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
244 page = pfn_to_page(pfn);
245 ClearPageNonWB(page);
246 }
247
248 return -EINVAL;
249}
250
251static int free_ram_pages_type(u64 start, u64 end)
252{
253 struct page *page;
254 u64 pfn, end_pfn;
255
256 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
257 page = pfn_to_page(pfn);
258 if (page_mapped(page) || !PageNonWB(page))
259 goto out;
260
261 ClearPageNonWB(page);
262 }
263 return 0;
264
265out:
266 end_pfn = pfn;
267 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
268 page = pfn_to_page(pfn);
269 SetPageNonWB(page);
270 }
271 return -EINVAL;
272}
273
e7f260a2 274/*
275 * req_type typically has one of the:
276 * - _PAGE_CACHE_WB
277 * - _PAGE_CACHE_WC
278 * - _PAGE_CACHE_UC_MINUS
279 * - _PAGE_CACHE_UC
280 *
281 * req_type will have a special case value '-1', when requester want to inherit
282 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
283 *
ac97991e
AH
284 * If new_type is NULL, function will return an error if it cannot reserve the
285 * region with req_type. If new_type is non-NULL, function will return
286 * available type in new_type in case of no error. In case of any error
e7f260a2 287 * it will return a negative return value.
288 */
2e5d9c85 289int reserve_memtype(u64 start, u64 end, unsigned long req_type,
ad2cde16 290 unsigned long *new_type)
2e5d9c85 291{
ac97991e 292 struct memtype *new, *entry;
2e5d9c85 293 unsigned long actual_type;
f6887264 294 struct list_head *where;
9542ada8 295 int is_range_ram;
ad2cde16 296 int err = 0;
2e5d9c85 297
ad2cde16 298 BUG_ON(start >= end); /* end is exclusive */
69e26be9 299
499f8f84 300 if (!pat_enabled) {
e7f260a2 301 /* This is identical to page table setting without PAT */
ac97991e
AH
302 if (new_type) {
303 if (req_type == -1)
304 *new_type = _PAGE_CACHE_WB;
305 else
306 *new_type = req_type & _PAGE_CACHE_MASK;
e7f260a2 307 }
2e5d9c85 308 return 0;
309 }
310
311 /* Low ISA region is always mapped WB in page table. No need to track */
bcc643dc 312 if (is_ISA_range(start, end - 1)) {
ac97991e
AH
313 if (new_type)
314 *new_type = _PAGE_CACHE_WB;
2e5d9c85 315 return 0;
316 }
317
e7f260a2 318 if (req_type == -1) {
319 /*
c26421d0
VP
320 * Call mtrr_lookup to get the type hint. This is an
321 * optimization for /dev/mem mmap'ers into WB memory (BIOS
322 * tools and ACPI tools). Use WB request for WB memory and use
323 * UC_MINUS otherwise.
e7f260a2 324 */
325 u8 mtrr_type = mtrr_type_lookup(start, end);
e7f260a2 326
69e26be9 327 if (mtrr_type == MTRR_TYPE_WRBACK)
e7f260a2 328 actual_type = _PAGE_CACHE_WB;
69e26be9 329 else
e7f260a2 330 actual_type = _PAGE_CACHE_UC_MINUS;
ad2cde16 331 } else {
69e26be9
AH
332 actual_type = pat_x_mtrr_type(start, end,
333 req_type & _PAGE_CACHE_MASK);
ad2cde16 334 }
2e5d9c85 335
95971342
SS
336 if (new_type)
337 *new_type = actual_type;
338
5cca0cf1
SS
339 /*
340 * For legacy reasons, some parts of the physical address range in the
341 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
342 * the e820 tables). So we will track the memory attributes of this
343 * legacy 1MB region using the linear memtype_list always.
344 */
345 if (end >= ISA_END_ADDRESS) {
346 is_range_ram = pagerange_is_ram(start, end);
347 if (is_range_ram == 1)
348 return reserve_ram_pages_type(start, end, req_type,
349 new_type);
350 else if (is_range_ram < 0)
351 return -EINVAL;
352 }
9542ada8 353
ac97991e
AH
354 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
355 if (!new)
2e5d9c85 356 return -ENOMEM;
357
ad2cde16
IM
358 new->start = start;
359 new->end = end;
360 new->type = actual_type;
2e5d9c85 361
2e5d9c85 362 spin_lock(&memtype_lock);
363
80c5e73d
VP
364 if (cached_entry && start >= cached_start)
365 entry = cached_entry;
366 else
367 entry = list_entry(&memtype_list, struct memtype, nd);
368
2e5d9c85 369 /* Search for existing mapping that overlaps the current range */
f6887264 370 where = NULL;
80c5e73d 371 list_for_each_entry_continue(entry, &memtype_list, nd) {
33af9039 372 if (end <= entry->start) {
f6887264 373 where = entry->nd.prev;
80c5e73d 374 cached_entry = list_entry(where, struct memtype, nd);
2e5d9c85 375 break;
33af9039 376 } else if (start <= entry->start) { /* end > entry->start */
64fe44c3 377 err = chk_conflict(new, entry, new_type);
33af9039
AH
378 if (!err) {
379 dprintk("Overlap at 0x%Lx-0x%Lx\n",
380 entry->start, entry->end);
381 where = entry->nd.prev;
80c5e73d
VP
382 cached_entry = list_entry(where,
383 struct memtype, nd);
2e5d9c85 384 }
2e5d9c85 385 break;
33af9039 386 } else if (start < entry->end) { /* start > entry->start */
64fe44c3 387 err = chk_conflict(new, entry, new_type);
33af9039
AH
388 if (!err) {
389 dprintk("Overlap at 0x%Lx-0x%Lx\n",
390 entry->start, entry->end);
80c5e73d
VP
391 cached_entry = list_entry(entry->nd.prev,
392 struct memtype, nd);
393
394 /*
395 * Move to right position in the linked
396 * list to add this new entry
397 */
398 list_for_each_entry_continue(entry,
399 &memtype_list, nd) {
400 if (start <= entry->start) {
401 where = entry->nd.prev;
402 break;
403 }
404 }
2e5d9c85 405 }
2e5d9c85 406 break;
407 }
408 }
409
410 if (err) {
3e9c83b3
AH
411 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
412 "track %s, req %s\n",
413 start, end, cattr_name(new->type), cattr_name(req_type));
ac97991e 414 kfree(new);
2e5d9c85 415 spin_unlock(&memtype_lock);
ad2cde16 416
2e5d9c85 417 return err;
418 }
419
80c5e73d
VP
420 cached_start = start;
421
f6887264
AH
422 if (where)
423 list_add(&new->nd, where);
424 else
ac97991e 425 list_add_tail(&new->nd, &memtype_list);
6997ab49 426
2e5d9c85 427 spin_unlock(&memtype_lock);
3e9c83b3
AH
428
429 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
430 start, end, cattr_name(new->type), cattr_name(req_type),
431 new_type ? cattr_name(*new_type) : "-");
432
2e5d9c85 433 return err;
434}
435
436int free_memtype(u64 start, u64 end)
437{
ac97991e 438 struct memtype *entry;
2e5d9c85 439 int err = -EINVAL;
9542ada8 440 int is_range_ram;
2e5d9c85 441
69e26be9 442 if (!pat_enabled)
2e5d9c85 443 return 0;
2e5d9c85 444
445 /* Low ISA region is always mapped WB. No need to track */
bcc643dc 446 if (is_ISA_range(start, end - 1))
2e5d9c85 447 return 0;
2e5d9c85 448
5cca0cf1
SS
449 /*
450 * For legacy reasons, some parts of the physical address range in the
451 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
452 * the e820 tables). So we will track the memory attributes of this
453 * legacy 1MB region using the linear memtype_list always.
454 */
455 if (end >= ISA_END_ADDRESS) {
456 is_range_ram = pagerange_is_ram(start, end);
457 if (is_range_ram == 1)
458 return free_ram_pages_type(start, end);
459 else if (is_range_ram < 0)
460 return -EINVAL;
461 }
9542ada8 462
2e5d9c85 463 spin_lock(&memtype_lock);
ac97991e
AH
464 list_for_each_entry(entry, &memtype_list, nd) {
465 if (entry->start == start && entry->end == end) {
80c5e73d
VP
466 if (cached_entry == entry || cached_start == start)
467 cached_entry = NULL;
468
ac97991e
AH
469 list_del(&entry->nd);
470 kfree(entry);
2e5d9c85 471 err = 0;
472 break;
473 }
474 }
475 spin_unlock(&memtype_lock);
476
477 if (err) {
28eb559b 478 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
2e5d9c85 479 current->comm, current->pid, start, end);
480 }
6997ab49 481
77b52b4c 482 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
ad2cde16 483
2e5d9c85 484 return err;
485}
486
f0970c13 487
f0970c13 488pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
489 unsigned long size, pgprot_t vma_prot)
490{
491 return vma_prot;
492}
493
d092633b
IM
494#ifdef CONFIG_STRICT_DEVMEM
495/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
496static inline int range_is_allowed(unsigned long pfn, unsigned long size)
497{
498 return 1;
499}
500#else
9e41bff2 501/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
502static inline int range_is_allowed(unsigned long pfn, unsigned long size)
503{
504 u64 from = ((u64)pfn) << PAGE_SHIFT;
505 u64 to = from + size;
506 u64 cursor = from;
507
9e41bff2
RT
508 if (!pat_enabled)
509 return 1;
510
0124cecf
VP
511 while (cursor < to) {
512 if (!devmem_is_allowed(pfn)) {
513 printk(KERN_INFO
514 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
515 current->comm, from, to);
516 return 0;
517 }
518 cursor += PAGE_SIZE;
519 pfn++;
520 }
521 return 1;
522}
d092633b 523#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 524
f0970c13 525int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
526 unsigned long size, pgprot_t *vma_prot)
527{
e7f260a2 528 u64 offset = ((u64) pfn) << PAGE_SHIFT;
28df82eb 529 unsigned long flags = -1;
e7f260a2 530 int retval;
f0970c13 531
0124cecf
VP
532 if (!range_is_allowed(pfn, size))
533 return 0;
534
f0970c13 535 if (file->f_flags & O_SYNC) {
28df82eb 536 flags = _PAGE_CACHE_UC_MINUS;
f0970c13 537 }
538
539#ifdef CONFIG_X86_32
540 /*
541 * On the PPro and successors, the MTRRs are used to set
542 * memory types for physical addresses outside main memory,
543 * so blindly setting UC or PWT on those pages is wrong.
544 * For Pentiums and earlier, the surround logic should disable
545 * caching for the high addresses through the KEN pin, but
546 * we maintain the tradition of paranoia in this code.
547 */
499f8f84 548 if (!pat_enabled &&
cd7a4e93
AH
549 !(boot_cpu_has(X86_FEATURE_MTRR) ||
550 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
551 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
552 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
553 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e7f260a2 554 flags = _PAGE_CACHE_UC;
f0970c13 555 }
556#endif
557
e7f260a2 558 /*
28df82eb 559 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
560 *
e7f260a2 561 * Without O_SYNC, we want to get
562 * - WB for WB-able memory and no other conflicting mappings
563 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
564 * - Inherit from confliting mappings otherwise
565 */
28df82eb 566 if (flags != -1) {
e7f260a2 567 retval = reserve_memtype(offset, offset + size, flags, NULL);
568 } else {
f022bfd5 569 retval = reserve_memtype(offset, offset + size, -1, &flags);
e7f260a2 570 }
571
572 if (retval < 0)
573 return 0;
574
b5db0e38
LT
575 if (((pfn < max_low_pfn_mapped) ||
576 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
577 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
e7f260a2 578 free_memtype(offset, offset + size);
28eb559b 579 printk(KERN_INFO
e7f260a2 580 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
581 current->comm, current->pid,
582 cattr_name(flags),
afc85343 583 offset, (unsigned long long)(offset + size));
e7f260a2 584 return 0;
585 }
586
587 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
588 flags);
f0970c13 589 return 1;
590}
e7f260a2 591
592void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
593{
ad2cde16 594 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
e7f260a2 595 u64 addr = (u64)pfn << PAGE_SHIFT;
596 unsigned long flags;
e7f260a2 597
598 reserve_memtype(addr, addr + size, want_flags, &flags);
599 if (flags != want_flags) {
28eb559b 600 printk(KERN_INFO
e7f260a2 601 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
602 current->comm, current->pid,
603 cattr_name(want_flags),
afc85343 604 addr, (unsigned long long)(addr + size),
e7f260a2 605 cattr_name(flags));
606 }
607}
608
609void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
610{
611 u64 addr = (u64)pfn << PAGE_SHIFT;
612
613 free_memtype(addr, addr + size);
614}
615
5899329b 616/*
617 * Internal interface to reserve a range of physical memory with prot.
618 * Reserved non RAM regions only and after successful reserve_memtype,
619 * this func also keeps identity mapping (if any) in sync with this new prot.
620 */
cdecff68 621static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
622 int strict_prot)
5899329b 623{
624 int is_ram = 0;
b5db0e38 625 int id_sz, ret;
5899329b 626 unsigned long flags;
cdecff68 627 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
5899329b 628
629 is_ram = pagerange_is_ram(paddr, paddr + size);
630
631 if (is_ram != 0) {
632 /*
633 * For mapping RAM pages, drivers need to call
634 * set_memory_[uc|wc|wb] directly, for reserve and free, before
635 * setting up the PTE.
636 */
637 WARN_ON_ONCE(1);
638 return 0;
639 }
640
641 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
642 if (ret)
643 return ret;
644
645 if (flags != want_flags) {
cdecff68 646 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
647 free_memtype(paddr, paddr + size);
648 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
649 " for %Lx-%Lx, got %s\n",
650 current->comm, current->pid,
651 cattr_name(want_flags),
652 (unsigned long long)paddr,
653 (unsigned long long)(paddr + size),
654 cattr_name(flags));
655 return -EINVAL;
656 }
657 /*
658 * We allow returning different type than the one requested in
659 * non strict case.
660 */
661 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
662 (~_PAGE_CACHE_MASK)) |
663 flags);
5899329b 664 }
665
b5db0e38
LT
666 /* Need to keep identity mapping in sync */
667 if (paddr >= __pa(high_memory))
668 return 0;
669
670 id_sz = (__pa(high_memory) < paddr + size) ?
671 __pa(high_memory) - paddr :
672 size;
673
674 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
5899329b 675 free_memtype(paddr, paddr + size);
676 printk(KERN_ERR
677 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
678 "for %Lx-%Lx\n",
679 current->comm, current->pid,
680 cattr_name(flags),
681 (unsigned long long)paddr,
682 (unsigned long long)(paddr + size));
683 return -EINVAL;
684 }
685 return 0;
686}
687
688/*
689 * Internal interface to free a range of physical memory.
690 * Frees non RAM regions only.
691 */
692static void free_pfn_range(u64 paddr, unsigned long size)
693{
694 int is_ram;
695
696 is_ram = pagerange_is_ram(paddr, paddr + size);
697 if (is_ram == 0)
698 free_memtype(paddr, paddr + size);
699}
700
701/*
702 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
703 * copied through copy_page_range().
704 *
705 * If the vma has a linear pfn mapping for the entire range, we get the prot
706 * from pte and reserve the entire vma range with single reserve_pfn_range call.
707 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
708 * by page to get physical address and protection.
709 */
710int track_pfn_vma_copy(struct vm_area_struct *vma)
711{
712 int retval = 0;
713 unsigned long i, j;
c1c15b65 714 resource_size_t paddr;
982d789a 715 unsigned long prot;
5899329b 716 unsigned long vma_start = vma->vm_start;
717 unsigned long vma_end = vma->vm_end;
718 unsigned long vma_size = vma_end - vma_start;
cdecff68 719 pgprot_t pgprot;
5899329b 720
721 if (!pat_enabled)
722 return 0;
723
724 if (is_linear_pfn_mapping(vma)) {
725 /*
982d789a 726 * reserve the whole chunk covered by vma. We need the
727 * starting address and protection from pte.
5899329b 728 */
982d789a 729 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
5899329b 730 WARN_ON_ONCE(1);
982d789a 731 return -EINVAL;
5899329b 732 }
cdecff68 733 pgprot = __pgprot(prot);
734 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 735 }
736
737 /* reserve entire vma page by page, using pfn and prot from pte */
738 for (i = 0; i < vma_size; i += PAGE_SIZE) {
982d789a 739 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
5899329b 740 continue;
741
cdecff68 742 pgprot = __pgprot(prot);
743 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
5899329b 744 if (retval)
745 goto cleanup_ret;
746 }
747 return 0;
748
749cleanup_ret:
750 /* Reserve error: Cleanup partial reservation and return error */
751 for (j = 0; j < i; j += PAGE_SIZE) {
982d789a 752 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
5899329b 753 continue;
754
5899329b 755 free_pfn_range(paddr, PAGE_SIZE);
756 }
757
758 return retval;
759}
760
761/*
762 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
763 * for physical range indicated by pfn and size.
764 *
765 * prot is passed in as a parameter for the new mapping. If the vma has a
766 * linear pfn mapping for the entire range reserve the entire vma range with
767 * single reserve_pfn_range call.
768 * Otherwise, we look t the pfn and size and reserve only the specified range
769 * page by page.
770 *
771 * Note that this function can be called with caller trying to map only a
772 * subrange/page inside the vma.
773 */
e4b866ed 774int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
5899329b 775 unsigned long pfn, unsigned long size)
776{
777 int retval = 0;
778 unsigned long i, j;
c1c15b65
PA
779 resource_size_t base_paddr;
780 resource_size_t paddr;
5899329b 781 unsigned long vma_start = vma->vm_start;
782 unsigned long vma_end = vma->vm_end;
783 unsigned long vma_size = vma_end - vma_start;
784
785 if (!pat_enabled)
786 return 0;
787
788 if (is_linear_pfn_mapping(vma)) {
789 /* reserve the whole chunk starting from vm_pgoff */
c1c15b65 790 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
cdecff68 791 return reserve_pfn_range(paddr, vma_size, prot, 0);
5899329b 792 }
793
794 /* reserve page by page using pfn and size */
c1c15b65 795 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
5899329b 796 for (i = 0; i < size; i += PAGE_SIZE) {
797 paddr = base_paddr + i;
cdecff68 798 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
5899329b 799 if (retval)
800 goto cleanup_ret;
801 }
802 return 0;
803
804cleanup_ret:
805 /* Reserve error: Cleanup partial reservation and return error */
806 for (j = 0; j < i; j += PAGE_SIZE) {
807 paddr = base_paddr + j;
808 free_pfn_range(paddr, PAGE_SIZE);
809 }
810
811 return retval;
812}
813
814/*
815 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
816 * untrack can be called for a specific region indicated by pfn and size or
817 * can be for the entire vma (in which case size can be zero).
818 */
819void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
820 unsigned long size)
821{
822 unsigned long i;
c1c15b65 823 resource_size_t paddr;
982d789a 824 unsigned long prot;
5899329b 825 unsigned long vma_start = vma->vm_start;
826 unsigned long vma_end = vma->vm_end;
827 unsigned long vma_size = vma_end - vma_start;
828
829 if (!pat_enabled)
830 return;
831
832 if (is_linear_pfn_mapping(vma)) {
833 /* free the whole chunk starting from vm_pgoff */
c1c15b65 834 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
5899329b 835 free_pfn_range(paddr, vma_size);
836 return;
837 }
838
839 if (size != 0 && size != vma_size) {
840 /* free page by page, using pfn and size */
c1c15b65 841 paddr = (resource_size_t)pfn << PAGE_SHIFT;
5899329b 842 for (i = 0; i < size; i += PAGE_SIZE) {
843 paddr = paddr + i;
844 free_pfn_range(paddr, PAGE_SIZE);
845 }
846 } else {
847 /* free entire vma, page by page, using the pfn from pte */
848 for (i = 0; i < vma_size; i += PAGE_SIZE) {
982d789a 849 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
5899329b 850 continue;
851
5899329b 852 free_pfn_range(paddr, PAGE_SIZE);
853 }
854 }
855}
856
2520bd31 857pgprot_t pgprot_writecombine(pgprot_t prot)
858{
859 if (pat_enabled)
860 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
861 else
862 return pgprot_noncached(prot);
863}
864
012f09e7 865#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 866
867/* get Nth element of the linked list */
868static struct memtype *memtype_get_idx(loff_t pos)
869{
870 struct memtype *list_node, *print_entry;
871 int i = 1;
872
873 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
874 if (!print_entry)
875 return NULL;
876
877 spin_lock(&memtype_lock);
878 list_for_each_entry(list_node, &memtype_list, nd) {
879 if (pos == i) {
880 *print_entry = *list_node;
881 spin_unlock(&memtype_lock);
882 return print_entry;
883 }
884 ++i;
885 }
886 spin_unlock(&memtype_lock);
887 kfree(print_entry);
ad2cde16 888
fec0962e 889 return NULL;
890}
891
892static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
893{
894 if (*pos == 0) {
895 ++*pos;
896 seq_printf(seq, "PAT memtype list:\n");
897 }
898
899 return memtype_get_idx(*pos);
900}
901
902static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
903{
904 ++*pos;
905 return memtype_get_idx(*pos);
906}
907
908static void memtype_seq_stop(struct seq_file *seq, void *v)
909{
910}
911
912static int memtype_seq_show(struct seq_file *seq, void *v)
913{
914 struct memtype *print_entry = (struct memtype *)v;
915
916 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
917 print_entry->start, print_entry->end);
918 kfree(print_entry);
ad2cde16 919
fec0962e 920 return 0;
921}
922
923static struct seq_operations memtype_seq_ops = {
924 .start = memtype_seq_start,
925 .next = memtype_seq_next,
926 .stop = memtype_seq_stop,
927 .show = memtype_seq_show,
928};
929
930static int memtype_seq_open(struct inode *inode, struct file *file)
931{
932 return seq_open(file, &memtype_seq_ops);
933}
934
935static const struct file_operations memtype_fops = {
936 .open = memtype_seq_open,
937 .read = seq_read,
938 .llseek = seq_lseek,
939 .release = seq_release,
940};
941
942static int __init pat_memtype_list_init(void)
943{
944 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
945 NULL, &memtype_fops);
946 return 0;
947}
948
949late_initcall(pat_memtype_list_init);
950
012f09e7 951#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */