x86/mm/pat: Create fixed width output in /sys/kernel/debug/x86/pat_memtype_list,...
[linux-block.git] / arch / x86 / mm / pat.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
2e5d9c85 2/*
aee7f913 3 * Page Attribute Table (PAT) support: handle memory caching attributes in page tables.
2e5d9c85 4 *
5 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 *
8 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
aee7f913
IM
9 *
10 * Basic principles:
11 *
12 * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and
13 * the kernel to set one of a handful of 'caching type' attributes for physical
14 * memory ranges: uncached, write-combining, write-through, write-protected,
15 * and the most commonly used and default attribute: write-back caching.
16 *
17 * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is
18 * a hardware interface to enumerate a limited number of physical memory ranges
19 * and set their caching attributes explicitly, programmed into the CPU via MSRs.
20 * Even modern CPUs have MTRRs enabled - but these are typically not touched
21 * by the kernel or by user-space (such as the X server), we rely on PAT for any
22 * additional cache attribute logic.
23 *
24 * PAT doesn't work via explicit memory ranges, but uses page table entries to add
25 * cache attribute information to the mapped memory range: there's 3 bits used,
26 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the
27 * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT).
28 *
29 * ( There's a metric ton of finer details, such as compatibility with CPU quirks
30 * that only support 4 types of PAT entries, and interaction with MTRRs, see
31 * below for details. )
2e5d9c85 32 */
33
ad2cde16 34#include <linux/seq_file.h>
57c8a661 35#include <linux/memblock.h>
ad2cde16 36#include <linux/debugfs.h>
9de94dbb 37#include <linux/ioport.h>
2e5d9c85 38#include <linux/kernel.h>
f25748e3 39#include <linux/pfn_t.h>
5a0e3ad6 40#include <linux/slab.h>
ad2cde16 41#include <linux/mm.h>
2e5d9c85 42#include <linux/fs.h>
335ef896 43#include <linux/rbtree.h>
2e5d9c85 44
ad2cde16 45#include <asm/cacheflush.h>
2e5d9c85 46#include <asm/processor.h>
ad2cde16 47#include <asm/tlbflush.h>
fd12a0d6 48#include <asm/x86_init.h>
2e5d9c85 49#include <asm/pgtable.h>
2e5d9c85 50#include <asm/fcntl.h>
66441bd3 51#include <asm/e820/api.h>
2e5d9c85 52#include <asm/mtrr.h>
ad2cde16
IM
53#include <asm/page.h>
54#include <asm/msr.h>
55#include <asm/pat.h>
e7f260a2 56#include <asm/io.h>
2e5d9c85 57
be5a0c12 58#include "pat_internal.h"
bd809af1 59#include "mm_internal.h"
be5a0c12 60
9e76561f
LR
61#undef pr_fmt
62#define pr_fmt(fmt) "" fmt
63
99c13b8c
MP
64static bool __read_mostly boot_cpu_done;
65static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
66static bool __read_mostly pat_initialized;
67static bool __read_mostly init_cm_done;
2e5d9c85 68
5557e831
IM
69/*
70 * PAT support is enabled by default, but can be disabled for
71 * various user-requested or hardware-forced reasons:
72 */
73void pat_disable(const char *msg_reason)
2e5d9c85 74{
99c13b8c 75 if (pat_disabled)
224bb1e5
TK
76 return;
77
78 if (boot_cpu_done) {
79 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
80 return;
81 }
82
99c13b8c 83 pat_disabled = true;
5557e831 84 pr_info("x86/PAT: %s\n", msg_reason);
2e5d9c85 85}
2e5d9c85 86
be524fb9 87static int __init nopat(char *str)
2e5d9c85 88{
5557e831 89 pat_disable("PAT support disabled via boot option.");
2e5d9c85 90 return 0;
91}
8d4a4300 92early_param("nopat", nopat);
cb32edf6
LR
93
94bool pat_enabled(void)
75a04811 95{
99c13b8c 96 return pat_initialized;
75a04811 97}
fbe7193a 98EXPORT_SYMBOL_GPL(pat_enabled);
77b52b4c 99
be5a0c12 100int pat_debug_enable;
ad2cde16 101
77b52b4c
VP
102static int __init pat_debug_setup(char *str)
103{
be5a0c12 104 pat_debug_enable = 1;
77b52b4c
VP
105 return 0;
106}
107__setup("debugpat", pat_debug_setup);
108
0dbcae88
TG
109#ifdef CONFIG_X86_PAT
110/*
35a5a104
TK
111 * X86 PAT uses page flags arch_1 and uncached together to keep track of
112 * memory type of pages that have backing page struct.
113 *
114 * X86 PAT supports 4 different memory types:
115 * - _PAGE_CACHE_MODE_WB
116 * - _PAGE_CACHE_MODE_WC
117 * - _PAGE_CACHE_MODE_UC_MINUS
118 * - _PAGE_CACHE_MODE_WT
119 *
120 * _PAGE_CACHE_MODE_WB is the default type.
0dbcae88
TG
121 */
122
35a5a104 123#define _PGMT_WB 0
0dbcae88
TG
124#define _PGMT_WC (1UL << PG_arch_1)
125#define _PGMT_UC_MINUS (1UL << PG_uncached)
35a5a104 126#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
0dbcae88
TG
127#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
128#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
129
130static inline enum page_cache_mode get_page_memtype(struct page *pg)
131{
132 unsigned long pg_flags = pg->flags & _PGMT_MASK;
133
35a5a104
TK
134 if (pg_flags == _PGMT_WB)
135 return _PAGE_CACHE_MODE_WB;
0dbcae88
TG
136 else if (pg_flags == _PGMT_WC)
137 return _PAGE_CACHE_MODE_WC;
138 else if (pg_flags == _PGMT_UC_MINUS)
139 return _PAGE_CACHE_MODE_UC_MINUS;
140 else
35a5a104 141 return _PAGE_CACHE_MODE_WT;
0dbcae88
TG
142}
143
144static inline void set_page_memtype(struct page *pg,
145 enum page_cache_mode memtype)
146{
147 unsigned long memtype_flags;
148 unsigned long old_flags;
149 unsigned long new_flags;
150
151 switch (memtype) {
152 case _PAGE_CACHE_MODE_WC:
153 memtype_flags = _PGMT_WC;
154 break;
155 case _PAGE_CACHE_MODE_UC_MINUS:
156 memtype_flags = _PGMT_UC_MINUS;
157 break;
35a5a104
TK
158 case _PAGE_CACHE_MODE_WT:
159 memtype_flags = _PGMT_WT;
0dbcae88 160 break;
35a5a104 161 case _PAGE_CACHE_MODE_WB:
0dbcae88 162 default:
35a5a104 163 memtype_flags = _PGMT_WB;
0dbcae88
TG
164 break;
165 }
166
167 do {
168 old_flags = pg->flags;
169 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
170 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
171}
172#else
173static inline enum page_cache_mode get_page_memtype(struct page *pg)
174{
175 return -1;
176}
177static inline void set_page_memtype(struct page *pg,
178 enum page_cache_mode memtype)
179{
180}
181#endif
182
2e5d9c85 183enum {
184 PAT_UC = 0, /* uncached */
185 PAT_WC = 1, /* Write combining */
186 PAT_WT = 4, /* Write Through */
187 PAT_WP = 5, /* Write Protected */
188 PAT_WB = 6, /* Write Back (default) */
6a6256f9 189 PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
2e5d9c85 190};
191
bd809af1
JG
192#define CM(c) (_PAGE_CACHE_MODE_ ## c)
193
194static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
195{
196 enum page_cache_mode cache;
197 char *cache_mode;
198
199 switch (pat_val) {
200 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
201 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
202 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
203 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
204 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
205 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
206 default: cache = CM(WB); cache_mode = "WB "; break;
207 }
208
209 memcpy(msg, cache_mode, 4);
210
211 return cache;
212}
213
214#undef CM
215
216/*
217 * Update the cache mode to pgprot translation tables according to PAT
218 * configuration.
219 * Using lower indices is preferred, so we start with highest index.
220 */
88ba2811 221static void __init_cache_modes(u64 pat)
bd809af1 222{
bd809af1
JG
223 enum page_cache_mode cache;
224 char pat_msg[33];
9cd25aac 225 int i;
bd809af1 226
bd809af1
JG
227 pat_msg[32] = 0;
228 for (i = 7; i >= 0; i--) {
229 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
230 pat_msg + 4 * i);
231 update_cache_mode_entry(i, cache);
232 }
9e76561f 233 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
99c13b8c
MP
234
235 init_cm_done = true;
bd809af1
JG
236}
237
cd7a4e93 238#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 239
9dac6290 240static void pat_bsp_init(u64 pat)
2e5d9c85 241{
9cd25aac
BP
242 u64 tmp_pat;
243
d63dcf49 244 if (!boot_cpu_has(X86_FEATURE_PAT)) {
5557e831 245 pat_disable("PAT not supported by the CPU.");
9dac6290
BP
246 return;
247 }
2e5d9c85 248
9cd25aac
BP
249 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
250 if (!tmp_pat) {
5557e831 251 pat_disable("PAT support disabled by the firmware.");
2e5d9c85 252 return;
9dac6290
BP
253 }
254
255 wrmsrl(MSR_IA32_CR_PAT, pat);
99c13b8c 256 pat_initialized = true;
2e5d9c85 257
02f037d6 258 __init_cache_modes(pat);
9dac6290
BP
259}
260
261static void pat_ap_init(u64 pat)
262{
c08d5174 263 if (!boot_cpu_has(X86_FEATURE_PAT)) {
9dac6290
BP
264 /*
265 * If this happens we are on a secondary CPU, but switched to
266 * PAT on the boot CPU. We have no way to undo PAT.
267 */
268 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
8d4a4300 269 }
2e5d9c85 270
9dac6290
BP
271 wrmsrl(MSR_IA32_CR_PAT, pat);
272}
273
99c13b8c 274void init_cache_modes(void)
9dac6290 275{
02f037d6 276 u64 pat = 0;
9dac6290 277
02f037d6
TK
278 if (init_cm_done)
279 return;
280
281 if (boot_cpu_has(X86_FEATURE_PAT)) {
282 /*
283 * CPU supports PAT. Set PAT table to be consistent with
284 * PAT MSR. This case supports "nopat" boot option, and
285 * virtual machine environments which support PAT without
286 * MTRRs. In specific, Xen has unique setup to PAT MSR.
287 *
288 * If PAT MSR returns 0, it is considered invalid and emulates
289 * as No PAT.
290 */
291 rdmsrl(MSR_IA32_CR_PAT, pat);
292 }
293
294 if (!pat) {
9cd25aac
BP
295 /*
296 * No PAT. Emulate the PAT table that corresponds to the two
02f037d6
TK
297 * cache bits, PWT (Write Through) and PCD (Cache Disable).
298 * This setup is also the same as the BIOS default setup.
9cd25aac 299 *
d79a40ca 300 * PTE encoding:
9cd25aac
BP
301 *
302 * PCD
303 * |PWT PAT
304 * || slot
305 * 00 0 WB : _PAGE_CACHE_MODE_WB
306 * 01 1 WT : _PAGE_CACHE_MODE_WT
307 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
308 * 11 3 UC : _PAGE_CACHE_MODE_UC
309 *
310 * NOTE: When WC or WP is used, it is redirected to UC- per
311 * the default setup in __cachemode2pte_tbl[].
312 */
313 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
314 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
02f037d6
TK
315 }
316
317 __init_cache_modes(pat);
02f037d6
TK
318}
319
320/**
5557e831 321 * pat_init - Initialize the PAT MSR and PAT table on the current CPU
02f037d6
TK
322 *
323 * This function initializes PAT MSR and PAT table with an OS-defined value
aac7b79e 324 * to enable additional cache attributes, WC, WT and WP.
02f037d6
TK
325 *
326 * This function must be called on all CPUs using the specific sequence of
327 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
328 * procedure for PAT.
329 */
330void pat_init(void)
331{
332 u64 pat;
333 struct cpuinfo_x86 *c = &boot_cpu_data;
334
5557e831
IM
335#ifndef CONFIG_X86_PAT
336 pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
337#endif
338
99c13b8c 339 if (pat_disabled)
02f037d6 340 return;
d79a40ca 341
02f037d6
TK
342 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
343 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
344 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
9cd25aac 345 /*
d79a40ca
TK
346 * PAT support with the lower four entries. Intel Pentium 2,
347 * 3, M, and 4 are affected by PAT errata, which makes the
348 * upper four entries unusable. To be on the safe side, we don't
349 * use those.
350 *
351 * PTE encoding:
9cd25aac
BP
352 * PAT
353 * |PCD
d79a40ca
TK
354 * ||PWT PAT
355 * ||| slot
356 * 000 0 WB : _PAGE_CACHE_MODE_WB
357 * 001 1 WC : _PAGE_CACHE_MODE_WC
358 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
359 * 011 3 UC : _PAGE_CACHE_MODE_UC
9cd25aac 360 * PAT bit unused
d79a40ca
TK
361 *
362 * NOTE: When WT or WP is used, it is redirected to UC- per
363 * the default setup in __cachemode2pte_tbl[].
9cd25aac
BP
364 */
365 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
366 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
d79a40ca
TK
367 } else {
368 /*
369 * Full PAT support. We put WT in slot 7 to improve
370 * robustness in the presence of errata that might cause
371 * the high PAT bit to be ignored. This way, a buggy slot 7
372 * access will hit slot 3, and slot 3 is UC, so at worst
373 * we lose performance without causing a correctness issue.
374 * Pentium 4 erratum N46 is an example for such an erratum,
375 * although we try not to use PAT at all on affected CPUs.
376 *
377 * PTE encoding:
378 * PAT
379 * |PCD
380 * ||PWT PAT
381 * ||| slot
382 * 000 0 WB : _PAGE_CACHE_MODE_WB
383 * 001 1 WC : _PAGE_CACHE_MODE_WC
384 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
385 * 011 3 UC : _PAGE_CACHE_MODE_UC
386 * 100 4 WB : Reserved
aac7b79e 387 * 101 5 WP : _PAGE_CACHE_MODE_WP
d79a40ca
TK
388 * 110 6 UC-: Reserved
389 * 111 7 WT : _PAGE_CACHE_MODE_WT
390 *
391 * The reserved slots are unused, but mapped to their
392 * corresponding types in the presence of PAT errata.
393 */
394 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
aac7b79e 395 PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
9cd25aac 396 }
2e5d9c85 397
9dac6290
BP
398 if (!boot_cpu_done) {
399 pat_bsp_init(pat);
400 boot_cpu_done = true;
401 } else {
402 pat_ap_init(pat);
9d34cfdf 403 }
2e5d9c85 404}
405
406#undef PAT
407
9e41a49a 408static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 409
2e5d9c85 410/*
411 * Does intersection of PAT memory type and MTRR memory type and returns
412 * the resulting memory type as PAT understands it.
413 * (Type in pat and mtrr will not have same value)
414 * The intersection is based on "Effective Memory Type" tables in IA-32
415 * SDM vol 3a
416 */
e00c8cc9
JG
417static unsigned long pat_x_mtrr_type(u64 start, u64 end,
418 enum page_cache_mode req_type)
2e5d9c85 419{
c26421d0
VP
420 /*
421 * Look for MTRR hint to get the effective type in case where PAT
422 * request is for WB.
423 */
e00c8cc9 424 if (req_type == _PAGE_CACHE_MODE_WB) {
b73522e0 425 u8 mtrr_type, uniform;
dd0c7c49 426
b73522e0 427 mtrr_type = mtrr_type_lookup(start, end, &uniform);
b6ff32d9 428 if (mtrr_type != MTRR_TYPE_WRBACK)
e00c8cc9 429 return _PAGE_CACHE_MODE_UC_MINUS;
b6ff32d9 430
e00c8cc9 431 return _PAGE_CACHE_MODE_WB;
dd0c7c49
AH
432 }
433
434 return req_type;
2e5d9c85 435}
436
fa83523f
JD
437struct pagerange_state {
438 unsigned long cur_pfn;
439 int ram;
440 int not_ram;
441};
442
443static int
444pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
445{
446 struct pagerange_state *state = arg;
447
448 state->not_ram |= initial_pfn > state->cur_pfn;
449 state->ram |= total_nr_pages > 0;
450 state->cur_pfn = initial_pfn + total_nr_pages;
451
452 return state->ram && state->not_ram;
453}
454
3709c857 455static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8 456{
fa83523f
JD
457 int ret = 0;
458 unsigned long start_pfn = start >> PAGE_SHIFT;
459 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
460 struct pagerange_state state = {start_pfn, 0, 0};
461
462 /*
463 * For legacy reasons, physical address range in the legacy ISA
464 * region is tracked as non-RAM. This will allow users of
465 * /dev/mem to map portions of legacy ISA region, even when
466 * some of those portions are listed(or not even listed) with
467 * different e820 types(RAM/reserved/..)
468 */
469 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
470 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
471
472 if (start_pfn < end_pfn) {
473 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
474 &state, pagerange_is_ram_callback);
be03d9e8
SS
475 }
476
fa83523f 477 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
be03d9e8
SS
478}
479
9542ada8 480/*
f5841740 481 * For RAM pages, we use page flags to mark the pages with appropriate type.
35a5a104
TK
482 * The page flags are limited to four types, WB (default), WC, WT and UC-.
483 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
484 * a new memory type is only allowed for a page mapped with the default WB
485 * type.
0d69bdff
TK
486 *
487 * Here we do two passes:
488 * - Find the memtype of all the pages in the range, look for any conflicts.
489 * - In case of no conflicts, set the new memtype for pages in the range.
9542ada8 490 */
e00c8cc9
JG
491static int reserve_ram_pages_type(u64 start, u64 end,
492 enum page_cache_mode req_type,
493 enum page_cache_mode *new_type)
9542ada8
SS
494{
495 struct page *page;
f5841740
VP
496 u64 pfn;
497
35a5a104 498 if (req_type == _PAGE_CACHE_MODE_WP) {
0d69bdff
TK
499 if (new_type)
500 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
501 return -EINVAL;
502 }
503
e00c8cc9 504 if (req_type == _PAGE_CACHE_MODE_UC) {
f5841740
VP
505 /* We do not support strong UC */
506 WARN_ON_ONCE(1);
e00c8cc9 507 req_type = _PAGE_CACHE_MODE_UC_MINUS;
f5841740 508 }
9542ada8
SS
509
510 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
e00c8cc9 511 enum page_cache_mode type;
9542ada8 512
f5841740
VP
513 page = pfn_to_page(pfn);
514 type = get_page_memtype(page);
35a5a104 515 if (type != _PAGE_CACHE_MODE_WB) {
9e76561f 516 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
365811d6 517 start, end - 1, type, req_type);
f5841740
VP
518 if (new_type)
519 *new_type = type;
520
521 return -EBUSY;
522 }
9542ada8 523 }
9542ada8 524
f5841740
VP
525 if (new_type)
526 *new_type = req_type;
527
528 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 529 page = pfn_to_page(pfn);
f5841740 530 set_page_memtype(page, req_type);
9542ada8 531 }
f5841740 532 return 0;
9542ada8
SS
533}
534
535static int free_ram_pages_type(u64 start, u64 end)
536{
537 struct page *page;
f5841740 538 u64 pfn;
9542ada8
SS
539
540 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
541 page = pfn_to_page(pfn);
35a5a104 542 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
9542ada8
SS
543 }
544 return 0;
9542ada8
SS
545}
546
510ee090
DW
547static u64 sanitize_phys(u64 address)
548{
549 /*
550 * When changing the memtype for pages containing poison allow
551 * for a "decoy" virtual address (bit 63 clear) passed to
552 * set_memory_X(). __pa() on a "decoy" address results in a
553 * physical address with bit 63 set.
51c3fbd8
DW
554 *
555 * Decoy addresses are not present for 32-bit builds, see
556 * set_mce_nospec().
510ee090 557 */
51c3fbd8
DW
558 if (IS_ENABLED(CONFIG_X86_64))
559 return address & __PHYSICAL_MASK;
560 return address;
510ee090
DW
561}
562
e7f260a2 563/*
564 * req_type typically has one of the:
e00c8cc9
JG
565 * - _PAGE_CACHE_MODE_WB
566 * - _PAGE_CACHE_MODE_WC
567 * - _PAGE_CACHE_MODE_UC_MINUS
568 * - _PAGE_CACHE_MODE_UC
0d69bdff 569 * - _PAGE_CACHE_MODE_WT
e7f260a2 570 *
ac97991e
AH
571 * If new_type is NULL, function will return an error if it cannot reserve the
572 * region with req_type. If new_type is non-NULL, function will return
573 * available type in new_type in case of no error. In case of any error
e7f260a2 574 * it will return a negative return value.
575 */
e00c8cc9
JG
576int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
577 enum page_cache_mode *new_type)
2e5d9c85 578{
be5a0c12 579 struct memtype *new;
e00c8cc9 580 enum page_cache_mode actual_type;
9542ada8 581 int is_range_ram;
ad2cde16 582 int err = 0;
2e5d9c85 583
510ee090
DW
584 start = sanitize_phys(start);
585 end = sanitize_phys(end);
51c3fbd8
DW
586 if (start >= end) {
587 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
588 start, end - 1, cattr_name(req_type));
589 return -EINVAL;
590 }
69e26be9 591
cb32edf6 592 if (!pat_enabled()) {
e7f260a2 593 /* This is identical to page table setting without PAT */
7202fdb1
BP
594 if (new_type)
595 *new_type = req_type;
2e5d9c85 596 return 0;
597 }
598
599 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 600 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e 601 if (new_type)
e00c8cc9 602 *new_type = _PAGE_CACHE_MODE_WB;
2e5d9c85 603 return 0;
604 }
605
b6ff32d9
SS
606 /*
607 * Call mtrr_lookup to get the type hint. This is an
608 * optimization for /dev/mem mmap'ers into WB memory (BIOS
609 * tools and ACPI tools). Use WB request for WB memory and use
610 * UC_MINUS otherwise.
611 */
e00c8cc9 612 actual_type = pat_x_mtrr_type(start, end, req_type);
2e5d9c85 613
95971342
SS
614 if (new_type)
615 *new_type = actual_type;
616
be03d9e8 617 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
618 if (is_range_ram == 1) {
619
f5841740 620 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
621
622 return err;
623 } else if (is_range_ram < 0) {
9542ada8 624 return -EINVAL;
f5841740 625 }
9542ada8 626
6a4f3b52 627 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
ac97991e 628 if (!new)
2e5d9c85 629 return -ENOMEM;
630
ad2cde16
IM
631 new->start = start;
632 new->end = end;
633 new->type = actual_type;
2e5d9c85 634
2e5d9c85 635 spin_lock(&memtype_lock);
636
511aaca8 637 err = memtype_check_insert(new, new_type);
2e5d9c85 638 if (err) {
9e76561f
LR
639 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
640 start, end - 1,
641 cattr_name(new->type), cattr_name(req_type));
ac97991e 642 kfree(new);
2e5d9c85 643 spin_unlock(&memtype_lock);
ad2cde16 644
2e5d9c85 645 return err;
646 }
647
2e5d9c85 648 spin_unlock(&memtype_lock);
3e9c83b3 649
365811d6
BH
650 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
651 start, end - 1, cattr_name(new->type), cattr_name(req_type),
3e9c83b3
AH
652 new_type ? cattr_name(*new_type) : "-");
653
2e5d9c85 654 return err;
655}
656
657int free_memtype(u64 start, u64 end)
658{
2e5d9c85 659 int err = -EINVAL;
9542ada8 660 int is_range_ram;
20413f27 661 struct memtype *entry;
2e5d9c85 662
cb32edf6 663 if (!pat_enabled())
2e5d9c85 664 return 0;
2e5d9c85 665
510ee090
DW
666 start = sanitize_phys(start);
667 end = sanitize_phys(end);
668
2e5d9c85 669 /* Low ISA region is always mapped WB. No need to track */
8a271389 670 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 671 return 0;
2e5d9c85 672
be03d9e8 673 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
674 if (is_range_ram == 1) {
675
f5841740 676 err = free_ram_pages_type(start, end);
f5841740
VP
677
678 return err;
679 } else if (is_range_ram < 0) {
9542ada8 680 return -EINVAL;
f5841740 681 }
9542ada8 682
2e5d9c85 683 spin_lock(&memtype_lock);
511aaca8 684 entry = memtype_erase(start, end);
2e5d9c85 685 spin_unlock(&memtype_lock);
686
2039e6ac 687 if (IS_ERR(entry)) {
9e76561f
LR
688 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
689 current->comm, current->pid, start, end - 1);
20413f27 690 return -EINVAL;
2e5d9c85 691 }
6997ab49 692
20413f27
XF
693 kfree(entry);
694
365811d6 695 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 696
20413f27 697 return 0;
2e5d9c85 698}
699
f0970c13 700
637b86e7
VP
701/**
702 * lookup_memtype - Looksup the memory type for a physical address
703 * @paddr: physical address of which memory type needs to be looked up
704 *
705 * Only to be called when PAT is enabled
706 *
2a374698 707 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
35a5a104 708 * or _PAGE_CACHE_MODE_WT.
637b86e7 709 */
2a374698 710static enum page_cache_mode lookup_memtype(u64 paddr)
637b86e7 711{
2a374698 712 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
713 struct memtype *entry;
714
8a271389 715 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
716 return rettype;
717
718 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
719 struct page *page;
637b86e7 720
35a5a104
TK
721 page = pfn_to_page(paddr >> PAGE_SHIFT);
722 return get_page_memtype(page);
637b86e7
VP
723 }
724
725 spin_lock(&memtype_lock);
726
511aaca8 727 entry = memtype_lookup(paddr);
637b86e7
VP
728 if (entry != NULL)
729 rettype = entry->type;
730 else
2a374698 731 rettype = _PAGE_CACHE_MODE_UC_MINUS;
637b86e7
VP
732
733 spin_unlock(&memtype_lock);
734 return rettype;
735}
736
b8d7044b
HZ
737/**
738 * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
739 * of @pfn cannot be overridden by UC MTRR memory type.
740 *
741 * Only to be called when PAT is enabled.
742 *
743 * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
744 * Returns false in other cases.
745 */
746bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
747{
748 enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
749
750 return cm == _PAGE_CACHE_MODE_UC ||
751 cm == _PAGE_CACHE_MODE_UC_MINUS ||
752 cm == _PAGE_CACHE_MODE_WC;
753}
754EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
755
9fd126bc
VP
756/**
757 * io_reserve_memtype - Request a memory type mapping for a region of memory
758 * @start: start (physical address) of the region
759 * @end: end (physical address) of the region
760 * @type: A pointer to memtype, with requested type. On success, requested
761 * or any other compatible type that was available for the region is returned
762 *
763 * On success, returns 0
764 * On failure, returns non-zero
765 */
766int io_reserve_memtype(resource_size_t start, resource_size_t end,
49a3b3cb 767 enum page_cache_mode *type)
9fd126bc 768{
b855192c 769 resource_size_t size = end - start;
49a3b3cb
JG
770 enum page_cache_mode req_type = *type;
771 enum page_cache_mode new_type;
9fd126bc
VP
772 int ret;
773
b855192c 774 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc
VP
775
776 ret = reserve_memtype(start, end, req_type, &new_type);
777 if (ret)
778 goto out_err;
779
b855192c 780 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
781 goto out_free;
782
b855192c 783 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
784 goto out_free;
785
786 *type = new_type;
787 return 0;
788
789out_free:
790 free_memtype(start, end);
791 ret = -EBUSY;
792out_err:
793 return ret;
794}
795
796/**
797 * io_free_memtype - Release a memory type mapping for a region of memory
798 * @start: start (physical address) of the region
799 * @end: end (physical address) of the region
800 */
801void io_free_memtype(resource_size_t start, resource_size_t end)
802{
803 free_memtype(start, end);
804}
805
8ef42276
DA
806int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
807{
808 enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
809
810 return io_reserve_memtype(start, start + size, &type);
811}
812EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
813
814void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
815{
816 io_free_memtype(start, start + size);
817}
818EXPORT_SYMBOL(arch_io_free_memtype_wc);
819
f0970c13 820pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
821 unsigned long size, pgprot_t vma_prot)
822{
8458bf94
TL
823 if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
824 vma_prot = pgprot_decrypted(vma_prot);
825
f0970c13 826 return vma_prot;
827}
828
d092633b 829#ifdef CONFIG_STRICT_DEVMEM
1f40a8bf 830/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
0124cecf
VP
831static inline int range_is_allowed(unsigned long pfn, unsigned long size)
832{
833 return 1;
834}
835#else
9e41bff2 836/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
837static inline int range_is_allowed(unsigned long pfn, unsigned long size)
838{
839 u64 from = ((u64)pfn) << PAGE_SHIFT;
840 u64 to = from + size;
841 u64 cursor = from;
842
cb32edf6 843 if (!pat_enabled())
9e41bff2
RT
844 return 1;
845
0124cecf 846 while (cursor < to) {
39380b80 847 if (!devmem_is_allowed(pfn))
0124cecf 848 return 0;
0124cecf
VP
849 cursor += PAGE_SIZE;
850 pfn++;
851 }
852 return 1;
853}
d092633b 854#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 855
f0970c13 856int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
857 unsigned long size, pgprot_t *vma_prot)
858{
e00c8cc9 859 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
f0970c13 860
0124cecf
VP
861 if (!range_is_allowed(pfn, size))
862 return 0;
863
6b2f3d1f 864 if (file->f_flags & O_DSYNC)
e00c8cc9 865 pcm = _PAGE_CACHE_MODE_UC_MINUS;
f0970c13 866
e7f260a2 867 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
e00c8cc9 868 cachemode2protval(pcm));
f0970c13 869 return 1;
870}
e7f260a2 871
7880f746 872/*
aee7f913 873 * Change the memory type for the physical address range in kernel identity
7880f746
VP
874 * mapping space if that range is a part of identity map.
875 */
b14097bd
JG
876int kernel_map_sync_memtype(u64 base, unsigned long size,
877 enum page_cache_mode pcm)
7880f746
VP
878{
879 unsigned long id_sz;
880
a25b9316 881 if (base > __pa(high_memory-1))
7880f746
VP
882 return 0;
883
60f583d5 884 /*
aee7f913
IM
885 * Some areas in the middle of the kernel identity range
886 * are not mapped, for example the PCI space.
60f583d5
DH
887 */
888 if (!page_is_ram(base >> PAGE_SHIFT))
889 return 0;
890
a25b9316 891 id_sz = (__pa(high_memory-1) <= base + size) ?
aee7f913 892 __pa(high_memory) - base : size;
7880f746 893
b14097bd 894 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
9e76561f 895 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
7880f746 896 current->comm, current->pid,
e00c8cc9 897 cattr_name(pcm),
365811d6 898 base, (unsigned long long)(base + size-1));
7880f746
VP
899 return -EINVAL;
900 }
901 return 0;
902}
903
5899329b 904/*
905 * Internal interface to reserve a range of physical memory with prot.
906 * Reserved non RAM regions only and after successful reserve_memtype,
907 * this func also keeps identity mapping (if any) in sync with this new prot.
908 */
cdecff68 909static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
910 int strict_prot)
5899329b 911{
912 int is_ram = 0;
7880f746 913 int ret;
e00c8cc9
JG
914 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
915 enum page_cache_mode pcm = want_pcm;
5899329b 916
be03d9e8 917 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 918
be03d9e8 919 /*
d886c73c
VP
920 * reserve_pfn_range() for RAM pages. We do not refcount to keep
921 * track of number of mappings of RAM pages. We can assert that
922 * the type requested matches the type of first page in the range.
be03d9e8 923 */
d886c73c 924 if (is_ram) {
cb32edf6 925 if (!pat_enabled())
d886c73c
VP
926 return 0;
927
e00c8cc9
JG
928 pcm = lookup_memtype(paddr);
929 if (want_pcm != pcm) {
9e76561f 930 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c 931 current->comm, current->pid,
e00c8cc9 932 cattr_name(want_pcm),
d886c73c 933 (unsigned long long)paddr,
365811d6 934 (unsigned long long)(paddr + size - 1),
e00c8cc9 935 cattr_name(pcm));
d886c73c 936 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
e00c8cc9
JG
937 (~_PAGE_CACHE_MASK)) |
938 cachemode2protval(pcm));
d886c73c 939 }
4bb9c5c0 940 return 0;
d886c73c 941 }
5899329b 942
e00c8cc9 943 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
5899329b 944 if (ret)
945 return ret;
946
e00c8cc9 947 if (pcm != want_pcm) {
1adcaafe 948 if (strict_prot ||
e00c8cc9 949 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
cdecff68 950 free_memtype(paddr, paddr + size);
9e76561f
LR
951 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
952 current->comm, current->pid,
953 cattr_name(want_pcm),
954 (unsigned long long)paddr,
955 (unsigned long long)(paddr + size - 1),
956 cattr_name(pcm));
cdecff68 957 return -EINVAL;
958 }
959 /*
960 * We allow returning different type than the one requested in
961 * non strict case.
962 */
963 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
964 (~_PAGE_CACHE_MASK)) |
e00c8cc9 965 cachemode2protval(pcm));
5899329b 966 }
967
e00c8cc9 968 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
5899329b 969 free_memtype(paddr, paddr + size);
5899329b 970 return -EINVAL;
971 }
972 return 0;
973}
974
975/*
976 * Internal interface to free a range of physical memory.
977 * Frees non RAM regions only.
978 */
979static void free_pfn_range(u64 paddr, unsigned long size)
980{
981 int is_ram;
982
be03d9e8 983 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 984 if (is_ram == 0)
985 free_memtype(paddr, paddr + size);
986}
987
988/*
5180da41 989 * track_pfn_copy is called when vma that is covering the pfnmap gets
5899329b 990 * copied through copy_page_range().
991 *
992 * If the vma has a linear pfn mapping for the entire range, we get the prot
993 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 994 */
5180da41 995int track_pfn_copy(struct vm_area_struct *vma)
5899329b 996{
c1c15b65 997 resource_size_t paddr;
982d789a 998 unsigned long prot;
4b065046 999 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 1000 pgprot_t pgprot;
5899329b 1001
b3b9c293 1002 if (vma->vm_flags & VM_PAT) {
5899329b 1003 /*
982d789a 1004 * reserve the whole chunk covered by vma. We need the
1005 * starting address and protection from pte.
5899329b 1006 */
4b065046 1007 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 1008 WARN_ON_ONCE(1);
982d789a 1009 return -EINVAL;
5899329b 1010 }
cdecff68 1011 pgprot = __pgprot(prot);
1012 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 1013 }
1014
5899329b 1015 return 0;
5899329b 1016}
1017
1018/*
9049771f
DW
1019 * prot is passed in as a parameter for the new mapping. If the vma has
1020 * a linear pfn mapping for the entire range, or no vma is provided,
1021 * reserve the entire pfn + size range with single reserve_pfn_range
1022 * call.
5899329b 1023 */
5180da41 1024int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293 1025 unsigned long pfn, unsigned long addr, unsigned long size)
5899329b 1026{
b1a86e15 1027 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
2a374698 1028 enum page_cache_mode pcm;
5899329b 1029
b1a86e15 1030 /* reserve the whole chunk starting from paddr */
9049771f
DW
1031 if (!vma || (addr == vma->vm_start
1032 && size == (vma->vm_end - vma->vm_start))) {
b3b9c293
KK
1033 int ret;
1034
1035 ret = reserve_pfn_range(paddr, size, prot, 0);
9049771f 1036 if (ret == 0 && vma)
b3b9c293
KK
1037 vma->vm_flags |= VM_PAT;
1038 return ret;
1039 }
5899329b 1040
cb32edf6 1041 if (!pat_enabled())
10876376
VP
1042 return 0;
1043
5180da41
SS
1044 /*
1045 * For anything smaller than the vma size we set prot based on the
1046 * lookup.
1047 */
2a374698 1048 pcm = lookup_memtype(paddr);
5180da41
SS
1049
1050 /* Check memtype for the remaining pages */
1051 while (size > PAGE_SIZE) {
1052 size -= PAGE_SIZE;
1053 paddr += PAGE_SIZE;
2a374698 1054 if (pcm != lookup_memtype(paddr))
5180da41
SS
1055 return -EINVAL;
1056 }
1057
dd7b6847 1058 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
2a374698 1059 cachemode2protval(pcm));
5180da41
SS
1060
1061 return 0;
1062}
1063
308a047c 1064void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
5180da41 1065{
2a374698 1066 enum page_cache_mode pcm;
5180da41 1067
cb32edf6 1068 if (!pat_enabled())
308a047c 1069 return;
5180da41
SS
1070
1071 /* Set prot based on lookup */
f25748e3 1072 pcm = lookup_memtype(pfn_t_to_phys(pfn));
dd7b6847 1073 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
2a374698 1074 cachemode2protval(pcm));
5899329b 1075}
1076
1077/*
5180da41 1078 * untrack_pfn is called while unmapping a pfnmap for a region.
5899329b 1079 * untrack can be called for a specific region indicated by pfn and size or
b1a86e15 1080 * can be for the entire vma (in which case pfn, size are zero).
5899329b 1081 */
5180da41
SS
1082void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1083 unsigned long size)
5899329b 1084{
c1c15b65 1085 resource_size_t paddr;
b1a86e15 1086 unsigned long prot;
5899329b 1087
9049771f 1088 if (vma && !(vma->vm_flags & VM_PAT))
5899329b 1089 return;
b1a86e15
SS
1090
1091 /* free the chunk starting from pfn or the whole chunk */
1092 paddr = (resource_size_t)pfn << PAGE_SHIFT;
1093 if (!paddr && !size) {
1094 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1095 WARN_ON_ONCE(1);
1096 return;
1097 }
1098
1099 size = vma->vm_end - vma->vm_start;
5899329b 1100 }
b1a86e15 1101 free_pfn_range(paddr, size);
9049771f
DW
1102 if (vma)
1103 vma->vm_flags &= ~VM_PAT;
5899329b 1104}
1105
d9fe4fab
TK
1106/*
1107 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1108 * with the old vma after its pfnmap page table has been removed. The new
1109 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1110 */
1111void untrack_pfn_moved(struct vm_area_struct *vma)
1112{
1113 vma->vm_flags &= ~VM_PAT;
1114}
1115
2520bd31 1116pgprot_t pgprot_writecombine(pgprot_t prot)
1117{
7202fdb1 1118 return __pgprot(pgprot_val(prot) |
e00c8cc9 1119 cachemode2protval(_PAGE_CACHE_MODE_WC));
2520bd31 1120}
92b9af9e 1121EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 1122
d1b4bfbf
TK
1123pgprot_t pgprot_writethrough(pgprot_t prot)
1124{
1125 return __pgprot(pgprot_val(prot) |
1126 cachemode2protval(_PAGE_CACHE_MODE_WT));
1127}
1128EXPORT_SYMBOL_GPL(pgprot_writethrough);
1129
012f09e7 1130#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 1131
aee7f913
IM
1132/*
1133 * We are allocating a temporary printout-entry to be passed
1134 * between seq_start()/next() and seq_show():
1135 */
fec0962e 1136static struct memtype *memtype_get_idx(loff_t pos)
1137{
be5a0c12 1138 struct memtype *print_entry;
1139 int ret;
fec0962e 1140
be5a0c12 1141 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
fec0962e 1142 if (!print_entry)
1143 return NULL;
1144
1145 spin_lock(&memtype_lock);
511aaca8 1146 ret = memtype_copy_nth_element(print_entry, pos);
fec0962e 1147 spin_unlock(&memtype_lock);
ad2cde16 1148
aee7f913
IM
1149 /* Free it on error: */
1150 if (ret) {
be5a0c12 1151 kfree(print_entry);
1152 return NULL;
1153 }
aee7f913
IM
1154
1155 return print_entry;
fec0962e 1156}
1157
1158static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1159{
1160 if (*pos == 0) {
1161 ++*pos;
3736708f 1162 seq_puts(seq, "PAT memtype list:\n");
fec0962e 1163 }
1164
1165 return memtype_get_idx(*pos);
1166}
1167
1168static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1169{
1170 ++*pos;
1171 return memtype_get_idx(*pos);
1172}
1173
1174static void memtype_seq_stop(struct seq_file *seq, void *v)
1175{
1176}
1177
1178static int memtype_seq_show(struct seq_file *seq, void *v)
1179{
1180 struct memtype *print_entry = (struct memtype *)v;
1181
ef35b0fc 1182 seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n",
aee7f913 1183 print_entry->start,
ef35b0fc
IM
1184 print_entry->end,
1185 cattr_name(print_entry->type));
aee7f913 1186
fec0962e 1187 kfree(print_entry);
ad2cde16 1188
fec0962e 1189 return 0;
1190}
1191
d535e431 1192static const struct seq_operations memtype_seq_ops = {
fec0962e 1193 .start = memtype_seq_start,
1194 .next = memtype_seq_next,
1195 .stop = memtype_seq_stop,
1196 .show = memtype_seq_show,
1197};
1198
1199static int memtype_seq_open(struct inode *inode, struct file *file)
1200{
1201 return seq_open(file, &memtype_seq_ops);
1202}
1203
1204static const struct file_operations memtype_fops = {
1205 .open = memtype_seq_open,
1206 .read = seq_read,
1207 .llseek = seq_lseek,
1208 .release = seq_release,
1209};
1210
1211static int __init pat_memtype_list_init(void)
1212{
cb32edf6 1213 if (pat_enabled()) {
dd4377b0
XF
1214 debugfs_create_file("pat_memtype_list", S_IRUSR,
1215 arch_debugfs_dir, NULL, &memtype_fops);
1216 }
fec0962e 1217 return 0;
1218}
fec0962e 1219late_initcall(pat_memtype_list_init);
1220
012f09e7 1221#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */