Merge tag 'v4.7-rc6' into x86/mm, to merge fixes before applying new changes
[linux-2.6-block.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
f25748e3 15#include <linux/pfn_t.h>
5a0e3ad6 16#include <linux/slab.h>
ad2cde16 17#include <linux/mm.h>
2e5d9c85 18#include <linux/fs.h>
335ef896 19#include <linux/rbtree.h>
2e5d9c85 20
ad2cde16 21#include <asm/cacheflush.h>
2e5d9c85 22#include <asm/processor.h>
ad2cde16 23#include <asm/tlbflush.h>
fd12a0d6 24#include <asm/x86_init.h>
2e5d9c85 25#include <asm/pgtable.h>
2e5d9c85 26#include <asm/fcntl.h>
ad2cde16 27#include <asm/e820.h>
2e5d9c85 28#include <asm/mtrr.h>
ad2cde16
IM
29#include <asm/page.h>
30#include <asm/msr.h>
31#include <asm/pat.h>
e7f260a2 32#include <asm/io.h>
2e5d9c85 33
be5a0c12 34#include "pat_internal.h"
bd809af1 35#include "mm_internal.h"
be5a0c12 36
9e76561f
LR
37#undef pr_fmt
38#define pr_fmt(fmt) "" fmt
39
9dac6290
BP
40static bool boot_cpu_done;
41
cb32edf6 42static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
224bb1e5 43static void init_cache_modes(void);
2e5d9c85 44
224bb1e5 45void pat_disable(const char *reason)
2e5d9c85 46{
224bb1e5
TK
47 if (!__pat_enabled)
48 return;
49
50 if (boot_cpu_done) {
51 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
52 return;
53 }
54
cb32edf6 55 __pat_enabled = 0;
9e76561f 56 pr_info("x86/PAT: %s\n", reason);
224bb1e5
TK
57
58 init_cache_modes();
2e5d9c85 59}
2e5d9c85 60
be524fb9 61static int __init nopat(char *str)
2e5d9c85 62{
8d4a4300 63 pat_disable("PAT support disabled.");
2e5d9c85 64 return 0;
65}
8d4a4300 66early_param("nopat", nopat);
cb32edf6
LR
67
68bool pat_enabled(void)
75a04811 69{
cb32edf6 70 return !!__pat_enabled;
75a04811 71}
fbe7193a 72EXPORT_SYMBOL_GPL(pat_enabled);
77b52b4c 73
be5a0c12 74int pat_debug_enable;
ad2cde16 75
77b52b4c
VP
76static int __init pat_debug_setup(char *str)
77{
be5a0c12 78 pat_debug_enable = 1;
77b52b4c
VP
79 return 0;
80}
81__setup("debugpat", pat_debug_setup);
82
0dbcae88
TG
83#ifdef CONFIG_X86_PAT
84/*
35a5a104
TK
85 * X86 PAT uses page flags arch_1 and uncached together to keep track of
86 * memory type of pages that have backing page struct.
87 *
88 * X86 PAT supports 4 different memory types:
89 * - _PAGE_CACHE_MODE_WB
90 * - _PAGE_CACHE_MODE_WC
91 * - _PAGE_CACHE_MODE_UC_MINUS
92 * - _PAGE_CACHE_MODE_WT
93 *
94 * _PAGE_CACHE_MODE_WB is the default type.
0dbcae88
TG
95 */
96
35a5a104 97#define _PGMT_WB 0
0dbcae88
TG
98#define _PGMT_WC (1UL << PG_arch_1)
99#define _PGMT_UC_MINUS (1UL << PG_uncached)
35a5a104 100#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
0dbcae88
TG
101#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
102#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
103
104static inline enum page_cache_mode get_page_memtype(struct page *pg)
105{
106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
107
35a5a104
TK
108 if (pg_flags == _PGMT_WB)
109 return _PAGE_CACHE_MODE_WB;
0dbcae88
TG
110 else if (pg_flags == _PGMT_WC)
111 return _PAGE_CACHE_MODE_WC;
112 else if (pg_flags == _PGMT_UC_MINUS)
113 return _PAGE_CACHE_MODE_UC_MINUS;
114 else
35a5a104 115 return _PAGE_CACHE_MODE_WT;
0dbcae88
TG
116}
117
118static inline void set_page_memtype(struct page *pg,
119 enum page_cache_mode memtype)
120{
121 unsigned long memtype_flags;
122 unsigned long old_flags;
123 unsigned long new_flags;
124
125 switch (memtype) {
126 case _PAGE_CACHE_MODE_WC:
127 memtype_flags = _PGMT_WC;
128 break;
129 case _PAGE_CACHE_MODE_UC_MINUS:
130 memtype_flags = _PGMT_UC_MINUS;
131 break;
35a5a104
TK
132 case _PAGE_CACHE_MODE_WT:
133 memtype_flags = _PGMT_WT;
0dbcae88 134 break;
35a5a104 135 case _PAGE_CACHE_MODE_WB:
0dbcae88 136 default:
35a5a104 137 memtype_flags = _PGMT_WB;
0dbcae88
TG
138 break;
139 }
140
141 do {
142 old_flags = pg->flags;
143 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
144 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
145}
146#else
147static inline enum page_cache_mode get_page_memtype(struct page *pg)
148{
149 return -1;
150}
151static inline void set_page_memtype(struct page *pg,
152 enum page_cache_mode memtype)
153{
154}
155#endif
156
2e5d9c85 157enum {
158 PAT_UC = 0, /* uncached */
159 PAT_WC = 1, /* Write combining */
160 PAT_WT = 4, /* Write Through */
161 PAT_WP = 5, /* Write Protected */
162 PAT_WB = 6, /* Write Back (default) */
6a6256f9 163 PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
2e5d9c85 164};
165
bd809af1
JG
166#define CM(c) (_PAGE_CACHE_MODE_ ## c)
167
168static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
169{
170 enum page_cache_mode cache;
171 char *cache_mode;
172
173 switch (pat_val) {
174 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
175 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
176 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
177 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
178 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
179 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
180 default: cache = CM(WB); cache_mode = "WB "; break;
181 }
182
183 memcpy(msg, cache_mode, 4);
184
185 return cache;
186}
187
188#undef CM
189
190/*
191 * Update the cache mode to pgprot translation tables according to PAT
192 * configuration.
193 * Using lower indices is preferred, so we start with highest index.
194 */
88ba2811 195static void __init_cache_modes(u64 pat)
bd809af1 196{
bd809af1
JG
197 enum page_cache_mode cache;
198 char pat_msg[33];
9cd25aac 199 int i;
bd809af1 200
bd809af1
JG
201 pat_msg[32] = 0;
202 for (i = 7; i >= 0; i--) {
203 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
204 pat_msg + 4 * i);
205 update_cache_mode_entry(i, cache);
206 }
9e76561f 207 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
bd809af1
JG
208}
209
cd7a4e93 210#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 211
9dac6290 212static void pat_bsp_init(u64 pat)
2e5d9c85 213{
9cd25aac
BP
214 u64 tmp_pat;
215
d63dcf49 216 if (!boot_cpu_has(X86_FEATURE_PAT)) {
9dac6290
BP
217 pat_disable("PAT not supported by CPU.");
218 return;
219 }
2e5d9c85 220
9cd25aac
BP
221 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
222 if (!tmp_pat) {
9dac6290 223 pat_disable("PAT MSR is 0, disabled.");
2e5d9c85 224 return;
9dac6290
BP
225 }
226
227 wrmsrl(MSR_IA32_CR_PAT, pat);
2e5d9c85 228
02f037d6 229 __init_cache_modes(pat);
9dac6290
BP
230}
231
232static void pat_ap_init(u64 pat)
233{
d63dcf49 234 if (!boot_cpu_has(X86_FEATURE_PAT)) {
9dac6290
BP
235 /*
236 * If this happens we are on a secondary CPU, but switched to
237 * PAT on the boot CPU. We have no way to undo PAT.
238 */
239 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
8d4a4300 240 }
2e5d9c85 241
9dac6290
BP
242 wrmsrl(MSR_IA32_CR_PAT, pat);
243}
244
02f037d6 245static void init_cache_modes(void)
9dac6290 246{
02f037d6
TK
247 u64 pat = 0;
248 static int init_cm_done;
9dac6290 249
02f037d6
TK
250 if (init_cm_done)
251 return;
252
253 if (boot_cpu_has(X86_FEATURE_PAT)) {
254 /*
255 * CPU supports PAT. Set PAT table to be consistent with
256 * PAT MSR. This case supports "nopat" boot option, and
257 * virtual machine environments which support PAT without
258 * MTRRs. In specific, Xen has unique setup to PAT MSR.
259 *
260 * If PAT MSR returns 0, it is considered invalid and emulates
261 * as No PAT.
262 */
263 rdmsrl(MSR_IA32_CR_PAT, pat);
264 }
265
266 if (!pat) {
9cd25aac
BP
267 /*
268 * No PAT. Emulate the PAT table that corresponds to the two
02f037d6
TK
269 * cache bits, PWT (Write Through) and PCD (Cache Disable).
270 * This setup is also the same as the BIOS default setup.
9cd25aac 271 *
d79a40ca 272 * PTE encoding:
9cd25aac
BP
273 *
274 * PCD
275 * |PWT PAT
276 * || slot
277 * 00 0 WB : _PAGE_CACHE_MODE_WB
278 * 01 1 WT : _PAGE_CACHE_MODE_WT
279 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
280 * 11 3 UC : _PAGE_CACHE_MODE_UC
281 *
282 * NOTE: When WC or WP is used, it is redirected to UC- per
283 * the default setup in __cachemode2pte_tbl[].
284 */
285 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
286 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
02f037d6
TK
287 }
288
289 __init_cache_modes(pat);
290
291 init_cm_done = 1;
292}
293
294/**
295 * pat_init - Initialize PAT MSR and PAT table
296 *
297 * This function initializes PAT MSR and PAT table with an OS-defined value
298 * to enable additional cache attributes, WC and WT.
299 *
300 * This function must be called on all CPUs using the specific sequence of
301 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
302 * procedure for PAT.
303 */
304void pat_init(void)
305{
306 u64 pat;
307 struct cpuinfo_x86 *c = &boot_cpu_data;
308
309 if (!pat_enabled()) {
310 init_cache_modes();
311 return;
312 }
d79a40ca 313
02f037d6
TK
314 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
315 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
316 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
9cd25aac 317 /*
d79a40ca
TK
318 * PAT support with the lower four entries. Intel Pentium 2,
319 * 3, M, and 4 are affected by PAT errata, which makes the
320 * upper four entries unusable. To be on the safe side, we don't
321 * use those.
322 *
323 * PTE encoding:
9cd25aac
BP
324 * PAT
325 * |PCD
d79a40ca
TK
326 * ||PWT PAT
327 * ||| slot
328 * 000 0 WB : _PAGE_CACHE_MODE_WB
329 * 001 1 WC : _PAGE_CACHE_MODE_WC
330 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
331 * 011 3 UC : _PAGE_CACHE_MODE_UC
9cd25aac 332 * PAT bit unused
d79a40ca
TK
333 *
334 * NOTE: When WT or WP is used, it is redirected to UC- per
335 * the default setup in __cachemode2pte_tbl[].
9cd25aac
BP
336 */
337 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
338 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
d79a40ca
TK
339 } else {
340 /*
341 * Full PAT support. We put WT in slot 7 to improve
342 * robustness in the presence of errata that might cause
343 * the high PAT bit to be ignored. This way, a buggy slot 7
344 * access will hit slot 3, and slot 3 is UC, so at worst
345 * we lose performance without causing a correctness issue.
346 * Pentium 4 erratum N46 is an example for such an erratum,
347 * although we try not to use PAT at all on affected CPUs.
348 *
349 * PTE encoding:
350 * PAT
351 * |PCD
352 * ||PWT PAT
353 * ||| slot
354 * 000 0 WB : _PAGE_CACHE_MODE_WB
355 * 001 1 WC : _PAGE_CACHE_MODE_WC
356 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
357 * 011 3 UC : _PAGE_CACHE_MODE_UC
358 * 100 4 WB : Reserved
359 * 101 5 WC : Reserved
360 * 110 6 UC-: Reserved
361 * 111 7 WT : _PAGE_CACHE_MODE_WT
362 *
363 * The reserved slots are unused, but mapped to their
364 * corresponding types in the presence of PAT errata.
365 */
366 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
367 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, WT);
9cd25aac 368 }
2e5d9c85 369
9dac6290
BP
370 if (!boot_cpu_done) {
371 pat_bsp_init(pat);
372 boot_cpu_done = true;
373 } else {
374 pat_ap_init(pat);
9d34cfdf 375 }
2e5d9c85 376}
377
378#undef PAT
379
9e41a49a 380static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 381
2e5d9c85 382/*
383 * Does intersection of PAT memory type and MTRR memory type and returns
384 * the resulting memory type as PAT understands it.
385 * (Type in pat and mtrr will not have same value)
386 * The intersection is based on "Effective Memory Type" tables in IA-32
387 * SDM vol 3a
388 */
e00c8cc9
JG
389static unsigned long pat_x_mtrr_type(u64 start, u64 end,
390 enum page_cache_mode req_type)
2e5d9c85 391{
c26421d0
VP
392 /*
393 * Look for MTRR hint to get the effective type in case where PAT
394 * request is for WB.
395 */
e00c8cc9 396 if (req_type == _PAGE_CACHE_MODE_WB) {
b73522e0 397 u8 mtrr_type, uniform;
dd0c7c49 398
b73522e0 399 mtrr_type = mtrr_type_lookup(start, end, &uniform);
b6ff32d9 400 if (mtrr_type != MTRR_TYPE_WRBACK)
e00c8cc9 401 return _PAGE_CACHE_MODE_UC_MINUS;
b6ff32d9 402
e00c8cc9 403 return _PAGE_CACHE_MODE_WB;
dd0c7c49
AH
404 }
405
406 return req_type;
2e5d9c85 407}
408
fa83523f
JD
409struct pagerange_state {
410 unsigned long cur_pfn;
411 int ram;
412 int not_ram;
413};
414
415static int
416pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
417{
418 struct pagerange_state *state = arg;
419
420 state->not_ram |= initial_pfn > state->cur_pfn;
421 state->ram |= total_nr_pages > 0;
422 state->cur_pfn = initial_pfn + total_nr_pages;
423
424 return state->ram && state->not_ram;
425}
426
3709c857 427static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8 428{
fa83523f
JD
429 int ret = 0;
430 unsigned long start_pfn = start >> PAGE_SHIFT;
431 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
432 struct pagerange_state state = {start_pfn, 0, 0};
433
434 /*
435 * For legacy reasons, physical address range in the legacy ISA
436 * region is tracked as non-RAM. This will allow users of
437 * /dev/mem to map portions of legacy ISA region, even when
438 * some of those portions are listed(or not even listed) with
439 * different e820 types(RAM/reserved/..)
440 */
441 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
442 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
443
444 if (start_pfn < end_pfn) {
445 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
446 &state, pagerange_is_ram_callback);
be03d9e8
SS
447 }
448
fa83523f 449 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
be03d9e8
SS
450}
451
9542ada8 452/*
f5841740 453 * For RAM pages, we use page flags to mark the pages with appropriate type.
35a5a104
TK
454 * The page flags are limited to four types, WB (default), WC, WT and UC-.
455 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
456 * a new memory type is only allowed for a page mapped with the default WB
457 * type.
0d69bdff
TK
458 *
459 * Here we do two passes:
460 * - Find the memtype of all the pages in the range, look for any conflicts.
461 * - In case of no conflicts, set the new memtype for pages in the range.
9542ada8 462 */
e00c8cc9
JG
463static int reserve_ram_pages_type(u64 start, u64 end,
464 enum page_cache_mode req_type,
465 enum page_cache_mode *new_type)
9542ada8
SS
466{
467 struct page *page;
f5841740
VP
468 u64 pfn;
469
35a5a104 470 if (req_type == _PAGE_CACHE_MODE_WP) {
0d69bdff
TK
471 if (new_type)
472 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
473 return -EINVAL;
474 }
475
e00c8cc9 476 if (req_type == _PAGE_CACHE_MODE_UC) {
f5841740
VP
477 /* We do not support strong UC */
478 WARN_ON_ONCE(1);
e00c8cc9 479 req_type = _PAGE_CACHE_MODE_UC_MINUS;
f5841740 480 }
9542ada8
SS
481
482 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
e00c8cc9 483 enum page_cache_mode type;
9542ada8 484
f5841740
VP
485 page = pfn_to_page(pfn);
486 type = get_page_memtype(page);
35a5a104 487 if (type != _PAGE_CACHE_MODE_WB) {
9e76561f 488 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
365811d6 489 start, end - 1, type, req_type);
f5841740
VP
490 if (new_type)
491 *new_type = type;
492
493 return -EBUSY;
494 }
9542ada8 495 }
9542ada8 496
f5841740
VP
497 if (new_type)
498 *new_type = req_type;
499
500 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 501 page = pfn_to_page(pfn);
f5841740 502 set_page_memtype(page, req_type);
9542ada8 503 }
f5841740 504 return 0;
9542ada8
SS
505}
506
507static int free_ram_pages_type(u64 start, u64 end)
508{
509 struct page *page;
f5841740 510 u64 pfn;
9542ada8
SS
511
512 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
513 page = pfn_to_page(pfn);
35a5a104 514 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
9542ada8
SS
515 }
516 return 0;
9542ada8
SS
517}
518
e7f260a2 519/*
520 * req_type typically has one of the:
e00c8cc9
JG
521 * - _PAGE_CACHE_MODE_WB
522 * - _PAGE_CACHE_MODE_WC
523 * - _PAGE_CACHE_MODE_UC_MINUS
524 * - _PAGE_CACHE_MODE_UC
0d69bdff 525 * - _PAGE_CACHE_MODE_WT
e7f260a2 526 *
ac97991e
AH
527 * If new_type is NULL, function will return an error if it cannot reserve the
528 * region with req_type. If new_type is non-NULL, function will return
529 * available type in new_type in case of no error. In case of any error
e7f260a2 530 * it will return a negative return value.
531 */
e00c8cc9
JG
532int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
533 enum page_cache_mode *new_type)
2e5d9c85 534{
be5a0c12 535 struct memtype *new;
e00c8cc9 536 enum page_cache_mode actual_type;
9542ada8 537 int is_range_ram;
ad2cde16 538 int err = 0;
2e5d9c85 539
ad2cde16 540 BUG_ON(start >= end); /* end is exclusive */
69e26be9 541
cb32edf6 542 if (!pat_enabled()) {
e7f260a2 543 /* This is identical to page table setting without PAT */
7202fdb1
BP
544 if (new_type)
545 *new_type = req_type;
2e5d9c85 546 return 0;
547 }
548
549 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 550 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e 551 if (new_type)
e00c8cc9 552 *new_type = _PAGE_CACHE_MODE_WB;
2e5d9c85 553 return 0;
554 }
555
b6ff32d9
SS
556 /*
557 * Call mtrr_lookup to get the type hint. This is an
558 * optimization for /dev/mem mmap'ers into WB memory (BIOS
559 * tools and ACPI tools). Use WB request for WB memory and use
560 * UC_MINUS otherwise.
561 */
e00c8cc9 562 actual_type = pat_x_mtrr_type(start, end, req_type);
2e5d9c85 563
95971342
SS
564 if (new_type)
565 *new_type = actual_type;
566
be03d9e8 567 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
568 if (is_range_ram == 1) {
569
f5841740 570 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
571
572 return err;
573 } else if (is_range_ram < 0) {
9542ada8 574 return -EINVAL;
f5841740 575 }
9542ada8 576
6a4f3b52 577 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
ac97991e 578 if (!new)
2e5d9c85 579 return -ENOMEM;
580
ad2cde16
IM
581 new->start = start;
582 new->end = end;
583 new->type = actual_type;
2e5d9c85 584
2e5d9c85 585 spin_lock(&memtype_lock);
586
9e41a49a 587 err = rbt_memtype_check_insert(new, new_type);
2e5d9c85 588 if (err) {
9e76561f
LR
589 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
590 start, end - 1,
591 cattr_name(new->type), cattr_name(req_type));
ac97991e 592 kfree(new);
2e5d9c85 593 spin_unlock(&memtype_lock);
ad2cde16 594
2e5d9c85 595 return err;
596 }
597
2e5d9c85 598 spin_unlock(&memtype_lock);
3e9c83b3 599
365811d6
BH
600 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
601 start, end - 1, cattr_name(new->type), cattr_name(req_type),
3e9c83b3
AH
602 new_type ? cattr_name(*new_type) : "-");
603
2e5d9c85 604 return err;
605}
606
607int free_memtype(u64 start, u64 end)
608{
2e5d9c85 609 int err = -EINVAL;
9542ada8 610 int is_range_ram;
20413f27 611 struct memtype *entry;
2e5d9c85 612
cb32edf6 613 if (!pat_enabled())
2e5d9c85 614 return 0;
2e5d9c85 615
616 /* Low ISA region is always mapped WB. No need to track */
8a271389 617 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 618 return 0;
2e5d9c85 619
be03d9e8 620 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
621 if (is_range_ram == 1) {
622
f5841740 623 err = free_ram_pages_type(start, end);
f5841740
VP
624
625 return err;
626 } else if (is_range_ram < 0) {
9542ada8 627 return -EINVAL;
f5841740 628 }
9542ada8 629
2e5d9c85 630 spin_lock(&memtype_lock);
20413f27 631 entry = rbt_memtype_erase(start, end);
2e5d9c85 632 spin_unlock(&memtype_lock);
633
2039e6ac 634 if (IS_ERR(entry)) {
9e76561f
LR
635 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
636 current->comm, current->pid, start, end - 1);
20413f27 637 return -EINVAL;
2e5d9c85 638 }
6997ab49 639
20413f27
XF
640 kfree(entry);
641
365811d6 642 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 643
20413f27 644 return 0;
2e5d9c85 645}
646
f0970c13 647
637b86e7
VP
648/**
649 * lookup_memtype - Looksup the memory type for a physical address
650 * @paddr: physical address of which memory type needs to be looked up
651 *
652 * Only to be called when PAT is enabled
653 *
2a374698 654 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
35a5a104 655 * or _PAGE_CACHE_MODE_WT.
637b86e7 656 */
2a374698 657static enum page_cache_mode lookup_memtype(u64 paddr)
637b86e7 658{
2a374698 659 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
660 struct memtype *entry;
661
8a271389 662 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
663 return rettype;
664
665 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
666 struct page *page;
637b86e7 667
35a5a104
TK
668 page = pfn_to_page(paddr >> PAGE_SHIFT);
669 return get_page_memtype(page);
637b86e7
VP
670 }
671
672 spin_lock(&memtype_lock);
673
9e41a49a 674 entry = rbt_memtype_lookup(paddr);
637b86e7
VP
675 if (entry != NULL)
676 rettype = entry->type;
677 else
2a374698 678 rettype = _PAGE_CACHE_MODE_UC_MINUS;
637b86e7
VP
679
680 spin_unlock(&memtype_lock);
681 return rettype;
682}
683
9fd126bc
VP
684/**
685 * io_reserve_memtype - Request a memory type mapping for a region of memory
686 * @start: start (physical address) of the region
687 * @end: end (physical address) of the region
688 * @type: A pointer to memtype, with requested type. On success, requested
689 * or any other compatible type that was available for the region is returned
690 *
691 * On success, returns 0
692 * On failure, returns non-zero
693 */
694int io_reserve_memtype(resource_size_t start, resource_size_t end,
49a3b3cb 695 enum page_cache_mode *type)
9fd126bc 696{
b855192c 697 resource_size_t size = end - start;
49a3b3cb
JG
698 enum page_cache_mode req_type = *type;
699 enum page_cache_mode new_type;
9fd126bc
VP
700 int ret;
701
b855192c 702 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc
VP
703
704 ret = reserve_memtype(start, end, req_type, &new_type);
705 if (ret)
706 goto out_err;
707
b855192c 708 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
709 goto out_free;
710
b855192c 711 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
712 goto out_free;
713
714 *type = new_type;
715 return 0;
716
717out_free:
718 free_memtype(start, end);
719 ret = -EBUSY;
720out_err:
721 return ret;
722}
723
724/**
725 * io_free_memtype - Release a memory type mapping for a region of memory
726 * @start: start (physical address) of the region
727 * @end: end (physical address) of the region
728 */
729void io_free_memtype(resource_size_t start, resource_size_t end)
730{
731 free_memtype(start, end);
732}
733
f0970c13 734pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
735 unsigned long size, pgprot_t vma_prot)
736{
737 return vma_prot;
738}
739
d092633b 740#ifdef CONFIG_STRICT_DEVMEM
1f40a8bf 741/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
0124cecf
VP
742static inline int range_is_allowed(unsigned long pfn, unsigned long size)
743{
744 return 1;
745}
746#else
9e41bff2 747/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
748static inline int range_is_allowed(unsigned long pfn, unsigned long size)
749{
750 u64 from = ((u64)pfn) << PAGE_SHIFT;
751 u64 to = from + size;
752 u64 cursor = from;
753
cb32edf6 754 if (!pat_enabled())
9e41bff2
RT
755 return 1;
756
0124cecf
VP
757 while (cursor < to) {
758 if (!devmem_is_allowed(pfn)) {
9e76561f
LR
759 pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
760 current->comm, from, to - 1);
0124cecf
VP
761 return 0;
762 }
763 cursor += PAGE_SIZE;
764 pfn++;
765 }
766 return 1;
767}
d092633b 768#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 769
f0970c13 770int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
771 unsigned long size, pgprot_t *vma_prot)
772{
e00c8cc9 773 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
f0970c13 774
0124cecf
VP
775 if (!range_is_allowed(pfn, size))
776 return 0;
777
6b2f3d1f 778 if (file->f_flags & O_DSYNC)
e00c8cc9 779 pcm = _PAGE_CACHE_MODE_UC_MINUS;
f0970c13 780
e7f260a2 781 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
e00c8cc9 782 cachemode2protval(pcm));
f0970c13 783 return 1;
784}
e7f260a2 785
7880f746
VP
786/*
787 * Change the memory type for the physial address range in kernel identity
788 * mapping space if that range is a part of identity map.
789 */
b14097bd
JG
790int kernel_map_sync_memtype(u64 base, unsigned long size,
791 enum page_cache_mode pcm)
7880f746
VP
792{
793 unsigned long id_sz;
794
a25b9316 795 if (base > __pa(high_memory-1))
7880f746
VP
796 return 0;
797
60f583d5
DH
798 /*
799 * some areas in the middle of the kernel identity range
800 * are not mapped, like the PCI space.
801 */
802 if (!page_is_ram(base >> PAGE_SHIFT))
803 return 0;
804
a25b9316 805 id_sz = (__pa(high_memory-1) <= base + size) ?
7880f746
VP
806 __pa(high_memory) - base :
807 size;
808
b14097bd 809 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
9e76561f 810 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
7880f746 811 current->comm, current->pid,
e00c8cc9 812 cattr_name(pcm),
365811d6 813 base, (unsigned long long)(base + size-1));
7880f746
VP
814 return -EINVAL;
815 }
816 return 0;
817}
818
5899329b 819/*
820 * Internal interface to reserve a range of physical memory with prot.
821 * Reserved non RAM regions only and after successful reserve_memtype,
822 * this func also keeps identity mapping (if any) in sync with this new prot.
823 */
cdecff68 824static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
825 int strict_prot)
5899329b 826{
827 int is_ram = 0;
7880f746 828 int ret;
e00c8cc9
JG
829 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
830 enum page_cache_mode pcm = want_pcm;
5899329b 831
be03d9e8 832 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 833
be03d9e8 834 /*
d886c73c
VP
835 * reserve_pfn_range() for RAM pages. We do not refcount to keep
836 * track of number of mappings of RAM pages. We can assert that
837 * the type requested matches the type of first page in the range.
be03d9e8 838 */
d886c73c 839 if (is_ram) {
cb32edf6 840 if (!pat_enabled())
d886c73c
VP
841 return 0;
842
e00c8cc9
JG
843 pcm = lookup_memtype(paddr);
844 if (want_pcm != pcm) {
9e76561f 845 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c 846 current->comm, current->pid,
e00c8cc9 847 cattr_name(want_pcm),
d886c73c 848 (unsigned long long)paddr,
365811d6 849 (unsigned long long)(paddr + size - 1),
e00c8cc9 850 cattr_name(pcm));
d886c73c 851 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
e00c8cc9
JG
852 (~_PAGE_CACHE_MASK)) |
853 cachemode2protval(pcm));
d886c73c 854 }
4bb9c5c0 855 return 0;
d886c73c 856 }
5899329b 857
e00c8cc9 858 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
5899329b 859 if (ret)
860 return ret;
861
e00c8cc9 862 if (pcm != want_pcm) {
1adcaafe 863 if (strict_prot ||
e00c8cc9 864 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
cdecff68 865 free_memtype(paddr, paddr + size);
9e76561f
LR
866 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
867 current->comm, current->pid,
868 cattr_name(want_pcm),
869 (unsigned long long)paddr,
870 (unsigned long long)(paddr + size - 1),
871 cattr_name(pcm));
cdecff68 872 return -EINVAL;
873 }
874 /*
875 * We allow returning different type than the one requested in
876 * non strict case.
877 */
878 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
879 (~_PAGE_CACHE_MASK)) |
e00c8cc9 880 cachemode2protval(pcm));
5899329b 881 }
882
e00c8cc9 883 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
5899329b 884 free_memtype(paddr, paddr + size);
5899329b 885 return -EINVAL;
886 }
887 return 0;
888}
889
890/*
891 * Internal interface to free a range of physical memory.
892 * Frees non RAM regions only.
893 */
894static void free_pfn_range(u64 paddr, unsigned long size)
895{
896 int is_ram;
897
be03d9e8 898 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 899 if (is_ram == 0)
900 free_memtype(paddr, paddr + size);
901}
902
903/*
5180da41 904 * track_pfn_copy is called when vma that is covering the pfnmap gets
5899329b 905 * copied through copy_page_range().
906 *
907 * If the vma has a linear pfn mapping for the entire range, we get the prot
908 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 909 */
5180da41 910int track_pfn_copy(struct vm_area_struct *vma)
5899329b 911{
c1c15b65 912 resource_size_t paddr;
982d789a 913 unsigned long prot;
4b065046 914 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 915 pgprot_t pgprot;
5899329b 916
b3b9c293 917 if (vma->vm_flags & VM_PAT) {
5899329b 918 /*
982d789a 919 * reserve the whole chunk covered by vma. We need the
920 * starting address and protection from pte.
5899329b 921 */
4b065046 922 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 923 WARN_ON_ONCE(1);
982d789a 924 return -EINVAL;
5899329b 925 }
cdecff68 926 pgprot = __pgprot(prot);
927 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 928 }
929
5899329b 930 return 0;
5899329b 931}
932
933/*
5899329b 934 * prot is passed in as a parameter for the new mapping. If the vma has a
935 * linear pfn mapping for the entire range reserve the entire vma range with
936 * single reserve_pfn_range call.
5899329b 937 */
5180da41 938int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293 939 unsigned long pfn, unsigned long addr, unsigned long size)
5899329b 940{
b1a86e15 941 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
2a374698 942 enum page_cache_mode pcm;
5899329b 943
b1a86e15 944 /* reserve the whole chunk starting from paddr */
b3b9c293
KK
945 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
946 int ret;
947
948 ret = reserve_pfn_range(paddr, size, prot, 0);
949 if (!ret)
950 vma->vm_flags |= VM_PAT;
951 return ret;
952 }
5899329b 953
cb32edf6 954 if (!pat_enabled())
10876376
VP
955 return 0;
956
5180da41
SS
957 /*
958 * For anything smaller than the vma size we set prot based on the
959 * lookup.
960 */
2a374698 961 pcm = lookup_memtype(paddr);
5180da41
SS
962
963 /* Check memtype for the remaining pages */
964 while (size > PAGE_SIZE) {
965 size -= PAGE_SIZE;
966 paddr += PAGE_SIZE;
2a374698 967 if (pcm != lookup_memtype(paddr))
5180da41
SS
968 return -EINVAL;
969 }
970
dd7b6847 971 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
2a374698 972 cachemode2protval(pcm));
5180da41
SS
973
974 return 0;
975}
976
977int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
f25748e3 978 pfn_t pfn)
5180da41 979{
2a374698 980 enum page_cache_mode pcm;
5180da41 981
cb32edf6 982 if (!pat_enabled())
5180da41
SS
983 return 0;
984
985 /* Set prot based on lookup */
f25748e3 986 pcm = lookup_memtype(pfn_t_to_phys(pfn));
dd7b6847 987 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
2a374698 988 cachemode2protval(pcm));
10876376 989
5899329b 990 return 0;
5899329b 991}
992
993/*
5180da41 994 * untrack_pfn is called while unmapping a pfnmap for a region.
5899329b 995 * untrack can be called for a specific region indicated by pfn and size or
b1a86e15 996 * can be for the entire vma (in which case pfn, size are zero).
5899329b 997 */
5180da41
SS
998void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
999 unsigned long size)
5899329b 1000{
c1c15b65 1001 resource_size_t paddr;
b1a86e15 1002 unsigned long prot;
5899329b 1003
b3b9c293 1004 if (!(vma->vm_flags & VM_PAT))
5899329b 1005 return;
b1a86e15
SS
1006
1007 /* free the chunk starting from pfn or the whole chunk */
1008 paddr = (resource_size_t)pfn << PAGE_SHIFT;
1009 if (!paddr && !size) {
1010 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1011 WARN_ON_ONCE(1);
1012 return;
1013 }
1014
1015 size = vma->vm_end - vma->vm_start;
5899329b 1016 }
b1a86e15 1017 free_pfn_range(paddr, size);
b3b9c293 1018 vma->vm_flags &= ~VM_PAT;
5899329b 1019}
1020
d9fe4fab
TK
1021/*
1022 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1023 * with the old vma after its pfnmap page table has been removed. The new
1024 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1025 */
1026void untrack_pfn_moved(struct vm_area_struct *vma)
1027{
1028 vma->vm_flags &= ~VM_PAT;
1029}
1030
2520bd31 1031pgprot_t pgprot_writecombine(pgprot_t prot)
1032{
7202fdb1 1033 return __pgprot(pgprot_val(prot) |
e00c8cc9 1034 cachemode2protval(_PAGE_CACHE_MODE_WC));
2520bd31 1035}
92b9af9e 1036EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 1037
d1b4bfbf
TK
1038pgprot_t pgprot_writethrough(pgprot_t prot)
1039{
1040 return __pgprot(pgprot_val(prot) |
1041 cachemode2protval(_PAGE_CACHE_MODE_WT));
1042}
1043EXPORT_SYMBOL_GPL(pgprot_writethrough);
1044
012f09e7 1045#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 1046
fec0962e 1047static struct memtype *memtype_get_idx(loff_t pos)
1048{
be5a0c12 1049 struct memtype *print_entry;
1050 int ret;
fec0962e 1051
be5a0c12 1052 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
fec0962e 1053 if (!print_entry)
1054 return NULL;
1055
1056 spin_lock(&memtype_lock);
9e41a49a 1057 ret = rbt_memtype_copy_nth_element(print_entry, pos);
fec0962e 1058 spin_unlock(&memtype_lock);
ad2cde16 1059
be5a0c12 1060 if (!ret) {
1061 return print_entry;
1062 } else {
1063 kfree(print_entry);
1064 return NULL;
1065 }
fec0962e 1066}
1067
1068static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1069{
1070 if (*pos == 0) {
1071 ++*pos;
3736708f 1072 seq_puts(seq, "PAT memtype list:\n");
fec0962e 1073 }
1074
1075 return memtype_get_idx(*pos);
1076}
1077
1078static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1079{
1080 ++*pos;
1081 return memtype_get_idx(*pos);
1082}
1083
1084static void memtype_seq_stop(struct seq_file *seq, void *v)
1085{
1086}
1087
1088static int memtype_seq_show(struct seq_file *seq, void *v)
1089{
1090 struct memtype *print_entry = (struct memtype *)v;
1091
1092 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
1093 print_entry->start, print_entry->end);
1094 kfree(print_entry);
ad2cde16 1095
fec0962e 1096 return 0;
1097}
1098
d535e431 1099static const struct seq_operations memtype_seq_ops = {
fec0962e 1100 .start = memtype_seq_start,
1101 .next = memtype_seq_next,
1102 .stop = memtype_seq_stop,
1103 .show = memtype_seq_show,
1104};
1105
1106static int memtype_seq_open(struct inode *inode, struct file *file)
1107{
1108 return seq_open(file, &memtype_seq_ops);
1109}
1110
1111static const struct file_operations memtype_fops = {
1112 .open = memtype_seq_open,
1113 .read = seq_read,
1114 .llseek = seq_lseek,
1115 .release = seq_release,
1116};
1117
1118static int __init pat_memtype_list_init(void)
1119{
cb32edf6 1120 if (pat_enabled()) {
dd4377b0
XF
1121 debugfs_create_file("pat_memtype_list", S_IRUSR,
1122 arch_debugfs_dir, NULL, &memtype_fops);
1123 }
fec0962e 1124 return 0;
1125}
1126
1127late_initcall(pat_memtype_list_init);
1128
012f09e7 1129#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */