x86/mm/pat: Harmonize 'struct memtype *' local variable and function parameter use
[linux-block.git] / arch / x86 / mm / pat.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
2e5d9c85 2/*
aee7f913 3 * Page Attribute Table (PAT) support: handle memory caching attributes in page tables.
2e5d9c85 4 *
5 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 *
8 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
aee7f913
IM
9 *
10 * Basic principles:
11 *
12 * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and
13 * the kernel to set one of a handful of 'caching type' attributes for physical
14 * memory ranges: uncached, write-combining, write-through, write-protected,
15 * and the most commonly used and default attribute: write-back caching.
16 *
17 * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is
18 * a hardware interface to enumerate a limited number of physical memory ranges
19 * and set their caching attributes explicitly, programmed into the CPU via MSRs.
20 * Even modern CPUs have MTRRs enabled - but these are typically not touched
21 * by the kernel or by user-space (such as the X server), we rely on PAT for any
22 * additional cache attribute logic.
23 *
24 * PAT doesn't work via explicit memory ranges, but uses page table entries to add
25 * cache attribute information to the mapped memory range: there's 3 bits used,
26 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the
27 * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT).
28 *
29 * ( There's a metric ton of finer details, such as compatibility with CPU quirks
30 * that only support 4 types of PAT entries, and interaction with MTRRs, see
31 * below for details. )
2e5d9c85 32 */
33
ad2cde16 34#include <linux/seq_file.h>
57c8a661 35#include <linux/memblock.h>
ad2cde16 36#include <linux/debugfs.h>
9de94dbb 37#include <linux/ioport.h>
2e5d9c85 38#include <linux/kernel.h>
f25748e3 39#include <linux/pfn_t.h>
5a0e3ad6 40#include <linux/slab.h>
ad2cde16 41#include <linux/mm.h>
2e5d9c85 42#include <linux/fs.h>
335ef896 43#include <linux/rbtree.h>
2e5d9c85 44
ad2cde16 45#include <asm/cacheflush.h>
2e5d9c85 46#include <asm/processor.h>
ad2cde16 47#include <asm/tlbflush.h>
fd12a0d6 48#include <asm/x86_init.h>
2e5d9c85 49#include <asm/pgtable.h>
2e5d9c85 50#include <asm/fcntl.h>
66441bd3 51#include <asm/e820/api.h>
2e5d9c85 52#include <asm/mtrr.h>
ad2cde16
IM
53#include <asm/page.h>
54#include <asm/msr.h>
55#include <asm/pat.h>
e7f260a2 56#include <asm/io.h>
2e5d9c85 57
be5a0c12 58#include "pat_internal.h"
bd809af1 59#include "mm_internal.h"
be5a0c12 60
9e76561f
LR
61#undef pr_fmt
62#define pr_fmt(fmt) "" fmt
63
99c13b8c
MP
64static bool __read_mostly boot_cpu_done;
65static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
66static bool __read_mostly pat_initialized;
67static bool __read_mostly init_cm_done;
2e5d9c85 68
5557e831
IM
69/*
70 * PAT support is enabled by default, but can be disabled for
71 * various user-requested or hardware-forced reasons:
72 */
73void pat_disable(const char *msg_reason)
2e5d9c85 74{
99c13b8c 75 if (pat_disabled)
224bb1e5
TK
76 return;
77
78 if (boot_cpu_done) {
79 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
80 return;
81 }
82
99c13b8c 83 pat_disabled = true;
5557e831 84 pr_info("x86/PAT: %s\n", msg_reason);
2e5d9c85 85}
2e5d9c85 86
be524fb9 87static int __init nopat(char *str)
2e5d9c85 88{
5557e831 89 pat_disable("PAT support disabled via boot option.");
2e5d9c85 90 return 0;
91}
8d4a4300 92early_param("nopat", nopat);
cb32edf6
LR
93
94bool pat_enabled(void)
75a04811 95{
99c13b8c 96 return pat_initialized;
75a04811 97}
fbe7193a 98EXPORT_SYMBOL_GPL(pat_enabled);
77b52b4c 99
be5a0c12 100int pat_debug_enable;
ad2cde16 101
77b52b4c
VP
102static int __init pat_debug_setup(char *str)
103{
be5a0c12 104 pat_debug_enable = 1;
77b52b4c
VP
105 return 0;
106}
107__setup("debugpat", pat_debug_setup);
108
0dbcae88
TG
109#ifdef CONFIG_X86_PAT
110/*
35a5a104
TK
111 * X86 PAT uses page flags arch_1 and uncached together to keep track of
112 * memory type of pages that have backing page struct.
113 *
114 * X86 PAT supports 4 different memory types:
115 * - _PAGE_CACHE_MODE_WB
116 * - _PAGE_CACHE_MODE_WC
117 * - _PAGE_CACHE_MODE_UC_MINUS
118 * - _PAGE_CACHE_MODE_WT
119 *
120 * _PAGE_CACHE_MODE_WB is the default type.
0dbcae88
TG
121 */
122
35a5a104 123#define _PGMT_WB 0
0dbcae88
TG
124#define _PGMT_WC (1UL << PG_arch_1)
125#define _PGMT_UC_MINUS (1UL << PG_uncached)
35a5a104 126#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
0dbcae88
TG
127#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
128#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
129
130static inline enum page_cache_mode get_page_memtype(struct page *pg)
131{
132 unsigned long pg_flags = pg->flags & _PGMT_MASK;
133
35a5a104
TK
134 if (pg_flags == _PGMT_WB)
135 return _PAGE_CACHE_MODE_WB;
0dbcae88
TG
136 else if (pg_flags == _PGMT_WC)
137 return _PAGE_CACHE_MODE_WC;
138 else if (pg_flags == _PGMT_UC_MINUS)
139 return _PAGE_CACHE_MODE_UC_MINUS;
140 else
35a5a104 141 return _PAGE_CACHE_MODE_WT;
0dbcae88
TG
142}
143
144static inline void set_page_memtype(struct page *pg,
145 enum page_cache_mode memtype)
146{
147 unsigned long memtype_flags;
148 unsigned long old_flags;
149 unsigned long new_flags;
150
151 switch (memtype) {
152 case _PAGE_CACHE_MODE_WC:
153 memtype_flags = _PGMT_WC;
154 break;
155 case _PAGE_CACHE_MODE_UC_MINUS:
156 memtype_flags = _PGMT_UC_MINUS;
157 break;
35a5a104
TK
158 case _PAGE_CACHE_MODE_WT:
159 memtype_flags = _PGMT_WT;
0dbcae88 160 break;
35a5a104 161 case _PAGE_CACHE_MODE_WB:
0dbcae88 162 default:
35a5a104 163 memtype_flags = _PGMT_WB;
0dbcae88
TG
164 break;
165 }
166
167 do {
168 old_flags = pg->flags;
169 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
170 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
171}
172#else
173static inline enum page_cache_mode get_page_memtype(struct page *pg)
174{
175 return -1;
176}
177static inline void set_page_memtype(struct page *pg,
178 enum page_cache_mode memtype)
179{
180}
181#endif
182
2e5d9c85 183enum {
184 PAT_UC = 0, /* uncached */
185 PAT_WC = 1, /* Write combining */
186 PAT_WT = 4, /* Write Through */
187 PAT_WP = 5, /* Write Protected */
188 PAT_WB = 6, /* Write Back (default) */
6a6256f9 189 PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
2e5d9c85 190};
191
bd809af1
JG
192#define CM(c) (_PAGE_CACHE_MODE_ ## c)
193
194static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
195{
196 enum page_cache_mode cache;
197 char *cache_mode;
198
199 switch (pat_val) {
200 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
201 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
202 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
203 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
204 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
205 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
206 default: cache = CM(WB); cache_mode = "WB "; break;
207 }
208
209 memcpy(msg, cache_mode, 4);
210
211 return cache;
212}
213
214#undef CM
215
216/*
217 * Update the cache mode to pgprot translation tables according to PAT
218 * configuration.
219 * Using lower indices is preferred, so we start with highest index.
220 */
88ba2811 221static void __init_cache_modes(u64 pat)
bd809af1 222{
bd809af1
JG
223 enum page_cache_mode cache;
224 char pat_msg[33];
9cd25aac 225 int i;
bd809af1 226
bd809af1
JG
227 pat_msg[32] = 0;
228 for (i = 7; i >= 0; i--) {
229 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
230 pat_msg + 4 * i);
231 update_cache_mode_entry(i, cache);
232 }
9e76561f 233 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
99c13b8c
MP
234
235 init_cm_done = true;
bd809af1
JG
236}
237
cd7a4e93 238#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 239
9dac6290 240static void pat_bsp_init(u64 pat)
2e5d9c85 241{
9cd25aac
BP
242 u64 tmp_pat;
243
d63dcf49 244 if (!boot_cpu_has(X86_FEATURE_PAT)) {
5557e831 245 pat_disable("PAT not supported by the CPU.");
9dac6290
BP
246 return;
247 }
2e5d9c85 248
9cd25aac
BP
249 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
250 if (!tmp_pat) {
5557e831 251 pat_disable("PAT support disabled by the firmware.");
2e5d9c85 252 return;
9dac6290
BP
253 }
254
255 wrmsrl(MSR_IA32_CR_PAT, pat);
99c13b8c 256 pat_initialized = true;
2e5d9c85 257
02f037d6 258 __init_cache_modes(pat);
9dac6290
BP
259}
260
261static void pat_ap_init(u64 pat)
262{
c08d5174 263 if (!boot_cpu_has(X86_FEATURE_PAT)) {
9dac6290
BP
264 /*
265 * If this happens we are on a secondary CPU, but switched to
266 * PAT on the boot CPU. We have no way to undo PAT.
267 */
268 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
8d4a4300 269 }
2e5d9c85 270
9dac6290
BP
271 wrmsrl(MSR_IA32_CR_PAT, pat);
272}
273
99c13b8c 274void init_cache_modes(void)
9dac6290 275{
02f037d6 276 u64 pat = 0;
9dac6290 277
02f037d6
TK
278 if (init_cm_done)
279 return;
280
281 if (boot_cpu_has(X86_FEATURE_PAT)) {
282 /*
283 * CPU supports PAT. Set PAT table to be consistent with
284 * PAT MSR. This case supports "nopat" boot option, and
285 * virtual machine environments which support PAT without
286 * MTRRs. In specific, Xen has unique setup to PAT MSR.
287 *
288 * If PAT MSR returns 0, it is considered invalid and emulates
289 * as No PAT.
290 */
291 rdmsrl(MSR_IA32_CR_PAT, pat);
292 }
293
294 if (!pat) {
9cd25aac
BP
295 /*
296 * No PAT. Emulate the PAT table that corresponds to the two
02f037d6
TK
297 * cache bits, PWT (Write Through) and PCD (Cache Disable).
298 * This setup is also the same as the BIOS default setup.
9cd25aac 299 *
d79a40ca 300 * PTE encoding:
9cd25aac
BP
301 *
302 * PCD
303 * |PWT PAT
304 * || slot
305 * 00 0 WB : _PAGE_CACHE_MODE_WB
306 * 01 1 WT : _PAGE_CACHE_MODE_WT
307 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
308 * 11 3 UC : _PAGE_CACHE_MODE_UC
309 *
310 * NOTE: When WC or WP is used, it is redirected to UC- per
311 * the default setup in __cachemode2pte_tbl[].
312 */
313 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
314 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
02f037d6
TK
315 }
316
317 __init_cache_modes(pat);
02f037d6
TK
318}
319
320/**
5557e831 321 * pat_init - Initialize the PAT MSR and PAT table on the current CPU
02f037d6
TK
322 *
323 * This function initializes PAT MSR and PAT table with an OS-defined value
aac7b79e 324 * to enable additional cache attributes, WC, WT and WP.
02f037d6
TK
325 *
326 * This function must be called on all CPUs using the specific sequence of
327 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
328 * procedure for PAT.
329 */
330void pat_init(void)
331{
332 u64 pat;
333 struct cpuinfo_x86 *c = &boot_cpu_data;
334
5557e831
IM
335#ifndef CONFIG_X86_PAT
336 pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
337#endif
338
99c13b8c 339 if (pat_disabled)
02f037d6 340 return;
d79a40ca 341
02f037d6
TK
342 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
343 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
344 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
9cd25aac 345 /*
d79a40ca
TK
346 * PAT support with the lower four entries. Intel Pentium 2,
347 * 3, M, and 4 are affected by PAT errata, which makes the
348 * upper four entries unusable. To be on the safe side, we don't
349 * use those.
350 *
351 * PTE encoding:
9cd25aac
BP
352 * PAT
353 * |PCD
d79a40ca
TK
354 * ||PWT PAT
355 * ||| slot
356 * 000 0 WB : _PAGE_CACHE_MODE_WB
357 * 001 1 WC : _PAGE_CACHE_MODE_WC
358 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
359 * 011 3 UC : _PAGE_CACHE_MODE_UC
9cd25aac 360 * PAT bit unused
d79a40ca
TK
361 *
362 * NOTE: When WT or WP is used, it is redirected to UC- per
363 * the default setup in __cachemode2pte_tbl[].
9cd25aac
BP
364 */
365 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
366 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
d79a40ca
TK
367 } else {
368 /*
369 * Full PAT support. We put WT in slot 7 to improve
370 * robustness in the presence of errata that might cause
371 * the high PAT bit to be ignored. This way, a buggy slot 7
372 * access will hit slot 3, and slot 3 is UC, so at worst
373 * we lose performance without causing a correctness issue.
374 * Pentium 4 erratum N46 is an example for such an erratum,
375 * although we try not to use PAT at all on affected CPUs.
376 *
377 * PTE encoding:
378 * PAT
379 * |PCD
380 * ||PWT PAT
381 * ||| slot
382 * 000 0 WB : _PAGE_CACHE_MODE_WB
383 * 001 1 WC : _PAGE_CACHE_MODE_WC
384 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
385 * 011 3 UC : _PAGE_CACHE_MODE_UC
386 * 100 4 WB : Reserved
aac7b79e 387 * 101 5 WP : _PAGE_CACHE_MODE_WP
d79a40ca
TK
388 * 110 6 UC-: Reserved
389 * 111 7 WT : _PAGE_CACHE_MODE_WT
390 *
391 * The reserved slots are unused, but mapped to their
392 * corresponding types in the presence of PAT errata.
393 */
394 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
aac7b79e 395 PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
9cd25aac 396 }
2e5d9c85 397
9dac6290
BP
398 if (!boot_cpu_done) {
399 pat_bsp_init(pat);
400 boot_cpu_done = true;
401 } else {
402 pat_ap_init(pat);
9d34cfdf 403 }
2e5d9c85 404}
405
406#undef PAT
407
9e41a49a 408static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 409
2e5d9c85 410/*
411 * Does intersection of PAT memory type and MTRR memory type and returns
412 * the resulting memory type as PAT understands it.
413 * (Type in pat and mtrr will not have same value)
414 * The intersection is based on "Effective Memory Type" tables in IA-32
415 * SDM vol 3a
416 */
e00c8cc9
JG
417static unsigned long pat_x_mtrr_type(u64 start, u64 end,
418 enum page_cache_mode req_type)
2e5d9c85 419{
c26421d0
VP
420 /*
421 * Look for MTRR hint to get the effective type in case where PAT
422 * request is for WB.
423 */
e00c8cc9 424 if (req_type == _PAGE_CACHE_MODE_WB) {
b73522e0 425 u8 mtrr_type, uniform;
dd0c7c49 426
b73522e0 427 mtrr_type = mtrr_type_lookup(start, end, &uniform);
b6ff32d9 428 if (mtrr_type != MTRR_TYPE_WRBACK)
e00c8cc9 429 return _PAGE_CACHE_MODE_UC_MINUS;
b6ff32d9 430
e00c8cc9 431 return _PAGE_CACHE_MODE_WB;
dd0c7c49
AH
432 }
433
434 return req_type;
2e5d9c85 435}
436
fa83523f
JD
437struct pagerange_state {
438 unsigned long cur_pfn;
439 int ram;
440 int not_ram;
441};
442
443static int
444pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
445{
446 struct pagerange_state *state = arg;
447
448 state->not_ram |= initial_pfn > state->cur_pfn;
449 state->ram |= total_nr_pages > 0;
450 state->cur_pfn = initial_pfn + total_nr_pages;
451
452 return state->ram && state->not_ram;
453}
454
3709c857 455static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8 456{
fa83523f
JD
457 int ret = 0;
458 unsigned long start_pfn = start >> PAGE_SHIFT;
459 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
460 struct pagerange_state state = {start_pfn, 0, 0};
461
462 /*
463 * For legacy reasons, physical address range in the legacy ISA
464 * region is tracked as non-RAM. This will allow users of
465 * /dev/mem to map portions of legacy ISA region, even when
466 * some of those portions are listed(or not even listed) with
467 * different e820 types(RAM/reserved/..)
468 */
469 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
470 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
471
472 if (start_pfn < end_pfn) {
473 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
474 &state, pagerange_is_ram_callback);
be03d9e8
SS
475 }
476
fa83523f 477 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
be03d9e8
SS
478}
479
9542ada8 480/*
f5841740 481 * For RAM pages, we use page flags to mark the pages with appropriate type.
35a5a104
TK
482 * The page flags are limited to four types, WB (default), WC, WT and UC-.
483 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
484 * a new memory type is only allowed for a page mapped with the default WB
485 * type.
0d69bdff
TK
486 *
487 * Here we do two passes:
488 * - Find the memtype of all the pages in the range, look for any conflicts.
489 * - In case of no conflicts, set the new memtype for pages in the range.
9542ada8 490 */
e00c8cc9
JG
491static int reserve_ram_pages_type(u64 start, u64 end,
492 enum page_cache_mode req_type,
493 enum page_cache_mode *new_type)
9542ada8
SS
494{
495 struct page *page;
f5841740
VP
496 u64 pfn;
497
35a5a104 498 if (req_type == _PAGE_CACHE_MODE_WP) {
0d69bdff
TK
499 if (new_type)
500 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
501 return -EINVAL;
502 }
503
e00c8cc9 504 if (req_type == _PAGE_CACHE_MODE_UC) {
f5841740
VP
505 /* We do not support strong UC */
506 WARN_ON_ONCE(1);
e00c8cc9 507 req_type = _PAGE_CACHE_MODE_UC_MINUS;
f5841740 508 }
9542ada8
SS
509
510 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
e00c8cc9 511 enum page_cache_mode type;
9542ada8 512
f5841740
VP
513 page = pfn_to_page(pfn);
514 type = get_page_memtype(page);
35a5a104 515 if (type != _PAGE_CACHE_MODE_WB) {
9e76561f 516 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
365811d6 517 start, end - 1, type, req_type);
f5841740
VP
518 if (new_type)
519 *new_type = type;
520
521 return -EBUSY;
522 }
9542ada8 523 }
9542ada8 524
f5841740
VP
525 if (new_type)
526 *new_type = req_type;
527
528 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 529 page = pfn_to_page(pfn);
f5841740 530 set_page_memtype(page, req_type);
9542ada8 531 }
f5841740 532 return 0;
9542ada8
SS
533}
534
535static int free_ram_pages_type(u64 start, u64 end)
536{
537 struct page *page;
f5841740 538 u64 pfn;
9542ada8
SS
539
540 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
541 page = pfn_to_page(pfn);
35a5a104 542 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
9542ada8
SS
543 }
544 return 0;
9542ada8
SS
545}
546
510ee090
DW
547static u64 sanitize_phys(u64 address)
548{
549 /*
550 * When changing the memtype for pages containing poison allow
551 * for a "decoy" virtual address (bit 63 clear) passed to
552 * set_memory_X(). __pa() on a "decoy" address results in a
553 * physical address with bit 63 set.
51c3fbd8
DW
554 *
555 * Decoy addresses are not present for 32-bit builds, see
556 * set_mce_nospec().
510ee090 557 */
51c3fbd8
DW
558 if (IS_ENABLED(CONFIG_X86_64))
559 return address & __PHYSICAL_MASK;
560 return address;
510ee090
DW
561}
562
e7f260a2 563/*
564 * req_type typically has one of the:
e00c8cc9
JG
565 * - _PAGE_CACHE_MODE_WB
566 * - _PAGE_CACHE_MODE_WC
567 * - _PAGE_CACHE_MODE_UC_MINUS
568 * - _PAGE_CACHE_MODE_UC
0d69bdff 569 * - _PAGE_CACHE_MODE_WT
e7f260a2 570 *
ac97991e
AH
571 * If new_type is NULL, function will return an error if it cannot reserve the
572 * region with req_type. If new_type is non-NULL, function will return
573 * available type in new_type in case of no error. In case of any error
e7f260a2 574 * it will return a negative return value.
575 */
e00c8cc9
JG
576int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
577 enum page_cache_mode *new_type)
2e5d9c85 578{
baf65855 579 struct memtype *entry_new;
e00c8cc9 580 enum page_cache_mode actual_type;
9542ada8 581 int is_range_ram;
ad2cde16 582 int err = 0;
2e5d9c85 583
510ee090
DW
584 start = sanitize_phys(start);
585 end = sanitize_phys(end);
51c3fbd8
DW
586 if (start >= end) {
587 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
588 start, end - 1, cattr_name(req_type));
589 return -EINVAL;
590 }
69e26be9 591
cb32edf6 592 if (!pat_enabled()) {
e7f260a2 593 /* This is identical to page table setting without PAT */
7202fdb1
BP
594 if (new_type)
595 *new_type = req_type;
2e5d9c85 596 return 0;
597 }
598
599 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 600 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e 601 if (new_type)
e00c8cc9 602 *new_type = _PAGE_CACHE_MODE_WB;
2e5d9c85 603 return 0;
604 }
605
b6ff32d9
SS
606 /*
607 * Call mtrr_lookup to get the type hint. This is an
608 * optimization for /dev/mem mmap'ers into WB memory (BIOS
609 * tools and ACPI tools). Use WB request for WB memory and use
610 * UC_MINUS otherwise.
611 */
e00c8cc9 612 actual_type = pat_x_mtrr_type(start, end, req_type);
2e5d9c85 613
95971342
SS
614 if (new_type)
615 *new_type = actual_type;
616
be03d9e8 617 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
618 if (is_range_ram == 1) {
619
f5841740 620 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
621
622 return err;
623 } else if (is_range_ram < 0) {
9542ada8 624 return -EINVAL;
f5841740 625 }
9542ada8 626
baf65855
IM
627 entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
628 if (!entry_new)
2e5d9c85 629 return -ENOMEM;
630
baf65855
IM
631 entry_new->start = start;
632 entry_new->end = end;
633 entry_new->type = actual_type;
2e5d9c85 634
2e5d9c85 635 spin_lock(&memtype_lock);
636
baf65855 637 err = memtype_check_insert(entry_new, new_type);
2e5d9c85 638 if (err) {
9e76561f
LR
639 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
640 start, end - 1,
baf65855
IM
641 cattr_name(entry_new->type), cattr_name(req_type));
642 kfree(entry_new);
2e5d9c85 643 spin_unlock(&memtype_lock);
ad2cde16 644
2e5d9c85 645 return err;
646 }
647
2e5d9c85 648 spin_unlock(&memtype_lock);
3e9c83b3 649
365811d6 650 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
baf65855 651 start, end - 1, cattr_name(entry_new->type), cattr_name(req_type),
3e9c83b3
AH
652 new_type ? cattr_name(*new_type) : "-");
653
2e5d9c85 654 return err;
655}
656
657int free_memtype(u64 start, u64 end)
658{
9542ada8 659 int is_range_ram;
baf65855 660 struct memtype *entry_old;
2e5d9c85 661
cb32edf6 662 if (!pat_enabled())
2e5d9c85 663 return 0;
2e5d9c85 664
510ee090
DW
665 start = sanitize_phys(start);
666 end = sanitize_phys(end);
667
2e5d9c85 668 /* Low ISA region is always mapped WB. No need to track */
8a271389 669 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 670 return 0;
2e5d9c85 671
be03d9e8 672 is_range_ram = pat_pagerange_is_ram(start, end);
47553d42
IM
673 if (is_range_ram == 1)
674 return free_ram_pages_type(start, end);
675 if (is_range_ram < 0)
9542ada8
SS
676 return -EINVAL;
677
2e5d9c85 678 spin_lock(&memtype_lock);
baf65855 679 entry_old = memtype_erase(start, end);
2e5d9c85 680 spin_unlock(&memtype_lock);
681
baf65855 682 if (IS_ERR(entry_old)) {
9e76561f
LR
683 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
684 current->comm, current->pid, start, end - 1);
20413f27 685 return -EINVAL;
2e5d9c85 686 }
6997ab49 687
baf65855 688 kfree(entry_old);
20413f27 689
365811d6 690 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 691
20413f27 692 return 0;
2e5d9c85 693}
694
f0970c13 695
637b86e7
VP
696/**
697 * lookup_memtype - Looksup the memory type for a physical address
698 * @paddr: physical address of which memory type needs to be looked up
699 *
700 * Only to be called when PAT is enabled
701 *
2a374698 702 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
35a5a104 703 * or _PAGE_CACHE_MODE_WT.
637b86e7 704 */
2a374698 705static enum page_cache_mode lookup_memtype(u64 paddr)
637b86e7 706{
2a374698 707 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
708 struct memtype *entry;
709
8a271389 710 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
711 return rettype;
712
713 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
714 struct page *page;
637b86e7 715
35a5a104
TK
716 page = pfn_to_page(paddr >> PAGE_SHIFT);
717 return get_page_memtype(page);
637b86e7
VP
718 }
719
720 spin_lock(&memtype_lock);
721
511aaca8 722 entry = memtype_lookup(paddr);
637b86e7
VP
723 if (entry != NULL)
724 rettype = entry->type;
725 else
2a374698 726 rettype = _PAGE_CACHE_MODE_UC_MINUS;
637b86e7
VP
727
728 spin_unlock(&memtype_lock);
baf65855 729
637b86e7
VP
730 return rettype;
731}
732
b8d7044b
HZ
733/**
734 * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
735 * of @pfn cannot be overridden by UC MTRR memory type.
736 *
737 * Only to be called when PAT is enabled.
738 *
739 * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
740 * Returns false in other cases.
741 */
742bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
743{
744 enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
745
746 return cm == _PAGE_CACHE_MODE_UC ||
747 cm == _PAGE_CACHE_MODE_UC_MINUS ||
748 cm == _PAGE_CACHE_MODE_WC;
749}
750EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
751
9fd126bc
VP
752/**
753 * io_reserve_memtype - Request a memory type mapping for a region of memory
754 * @start: start (physical address) of the region
755 * @end: end (physical address) of the region
756 * @type: A pointer to memtype, with requested type. On success, requested
757 * or any other compatible type that was available for the region is returned
758 *
759 * On success, returns 0
760 * On failure, returns non-zero
761 */
762int io_reserve_memtype(resource_size_t start, resource_size_t end,
49a3b3cb 763 enum page_cache_mode *type)
9fd126bc 764{
b855192c 765 resource_size_t size = end - start;
49a3b3cb
JG
766 enum page_cache_mode req_type = *type;
767 enum page_cache_mode new_type;
9fd126bc
VP
768 int ret;
769
b855192c 770 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc
VP
771
772 ret = reserve_memtype(start, end, req_type, &new_type);
773 if (ret)
774 goto out_err;
775
b855192c 776 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
777 goto out_free;
778
b855192c 779 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
780 goto out_free;
781
782 *type = new_type;
783 return 0;
784
785out_free:
786 free_memtype(start, end);
787 ret = -EBUSY;
788out_err:
789 return ret;
790}
791
792/**
793 * io_free_memtype - Release a memory type mapping for a region of memory
794 * @start: start (physical address) of the region
795 * @end: end (physical address) of the region
796 */
797void io_free_memtype(resource_size_t start, resource_size_t end)
798{
799 free_memtype(start, end);
800}
801
8ef42276
DA
802int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
803{
804 enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
805
806 return io_reserve_memtype(start, start + size, &type);
807}
808EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
809
810void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
811{
812 io_free_memtype(start, start + size);
813}
814EXPORT_SYMBOL(arch_io_free_memtype_wc);
815
f0970c13 816pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
817 unsigned long size, pgprot_t vma_prot)
818{
8458bf94
TL
819 if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
820 vma_prot = pgprot_decrypted(vma_prot);
821
f0970c13 822 return vma_prot;
823}
824
d092633b 825#ifdef CONFIG_STRICT_DEVMEM
1f40a8bf 826/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
0124cecf
VP
827static inline int range_is_allowed(unsigned long pfn, unsigned long size)
828{
829 return 1;
830}
831#else
9e41bff2 832/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
833static inline int range_is_allowed(unsigned long pfn, unsigned long size)
834{
835 u64 from = ((u64)pfn) << PAGE_SHIFT;
836 u64 to = from + size;
837 u64 cursor = from;
838
cb32edf6 839 if (!pat_enabled())
9e41bff2
RT
840 return 1;
841
0124cecf 842 while (cursor < to) {
39380b80 843 if (!devmem_is_allowed(pfn))
0124cecf 844 return 0;
0124cecf
VP
845 cursor += PAGE_SIZE;
846 pfn++;
847 }
848 return 1;
849}
d092633b 850#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 851
f0970c13 852int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
853 unsigned long size, pgprot_t *vma_prot)
854{
e00c8cc9 855 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
f0970c13 856
0124cecf
VP
857 if (!range_is_allowed(pfn, size))
858 return 0;
859
6b2f3d1f 860 if (file->f_flags & O_DSYNC)
e00c8cc9 861 pcm = _PAGE_CACHE_MODE_UC_MINUS;
f0970c13 862
e7f260a2 863 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
e00c8cc9 864 cachemode2protval(pcm));
f0970c13 865 return 1;
866}
e7f260a2 867
7880f746 868/*
aee7f913 869 * Change the memory type for the physical address range in kernel identity
7880f746
VP
870 * mapping space if that range is a part of identity map.
871 */
b14097bd
JG
872int kernel_map_sync_memtype(u64 base, unsigned long size,
873 enum page_cache_mode pcm)
7880f746
VP
874{
875 unsigned long id_sz;
876
a25b9316 877 if (base > __pa(high_memory-1))
7880f746
VP
878 return 0;
879
60f583d5 880 /*
aee7f913
IM
881 * Some areas in the middle of the kernel identity range
882 * are not mapped, for example the PCI space.
60f583d5
DH
883 */
884 if (!page_is_ram(base >> PAGE_SHIFT))
885 return 0;
886
a25b9316 887 id_sz = (__pa(high_memory-1) <= base + size) ?
aee7f913 888 __pa(high_memory) - base : size;
7880f746 889
b14097bd 890 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
9e76561f 891 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
7880f746 892 current->comm, current->pid,
e00c8cc9 893 cattr_name(pcm),
365811d6 894 base, (unsigned long long)(base + size-1));
7880f746
VP
895 return -EINVAL;
896 }
897 return 0;
898}
899
5899329b 900/*
901 * Internal interface to reserve a range of physical memory with prot.
902 * Reserved non RAM regions only and after successful reserve_memtype,
903 * this func also keeps identity mapping (if any) in sync with this new prot.
904 */
cdecff68 905static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
906 int strict_prot)
5899329b 907{
908 int is_ram = 0;
7880f746 909 int ret;
e00c8cc9
JG
910 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
911 enum page_cache_mode pcm = want_pcm;
5899329b 912
be03d9e8 913 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 914
be03d9e8 915 /*
d886c73c
VP
916 * reserve_pfn_range() for RAM pages. We do not refcount to keep
917 * track of number of mappings of RAM pages. We can assert that
918 * the type requested matches the type of first page in the range.
be03d9e8 919 */
d886c73c 920 if (is_ram) {
cb32edf6 921 if (!pat_enabled())
d886c73c
VP
922 return 0;
923
e00c8cc9
JG
924 pcm = lookup_memtype(paddr);
925 if (want_pcm != pcm) {
9e76561f 926 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c 927 current->comm, current->pid,
e00c8cc9 928 cattr_name(want_pcm),
d886c73c 929 (unsigned long long)paddr,
365811d6 930 (unsigned long long)(paddr + size - 1),
e00c8cc9 931 cattr_name(pcm));
d886c73c 932 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
e00c8cc9
JG
933 (~_PAGE_CACHE_MASK)) |
934 cachemode2protval(pcm));
d886c73c 935 }
4bb9c5c0 936 return 0;
d886c73c 937 }
5899329b 938
e00c8cc9 939 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
5899329b 940 if (ret)
941 return ret;
942
e00c8cc9 943 if (pcm != want_pcm) {
1adcaafe 944 if (strict_prot ||
e00c8cc9 945 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
cdecff68 946 free_memtype(paddr, paddr + size);
9e76561f
LR
947 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
948 current->comm, current->pid,
949 cattr_name(want_pcm),
950 (unsigned long long)paddr,
951 (unsigned long long)(paddr + size - 1),
952 cattr_name(pcm));
cdecff68 953 return -EINVAL;
954 }
955 /*
956 * We allow returning different type than the one requested in
957 * non strict case.
958 */
959 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
960 (~_PAGE_CACHE_MASK)) |
e00c8cc9 961 cachemode2protval(pcm));
5899329b 962 }
963
e00c8cc9 964 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
5899329b 965 free_memtype(paddr, paddr + size);
5899329b 966 return -EINVAL;
967 }
968 return 0;
969}
970
971/*
972 * Internal interface to free a range of physical memory.
973 * Frees non RAM regions only.
974 */
975static void free_pfn_range(u64 paddr, unsigned long size)
976{
977 int is_ram;
978
be03d9e8 979 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 980 if (is_ram == 0)
981 free_memtype(paddr, paddr + size);
982}
983
984/*
5180da41 985 * track_pfn_copy is called when vma that is covering the pfnmap gets
5899329b 986 * copied through copy_page_range().
987 *
988 * If the vma has a linear pfn mapping for the entire range, we get the prot
989 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 990 */
5180da41 991int track_pfn_copy(struct vm_area_struct *vma)
5899329b 992{
c1c15b65 993 resource_size_t paddr;
982d789a 994 unsigned long prot;
4b065046 995 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 996 pgprot_t pgprot;
5899329b 997
b3b9c293 998 if (vma->vm_flags & VM_PAT) {
5899329b 999 /*
982d789a 1000 * reserve the whole chunk covered by vma. We need the
1001 * starting address and protection from pte.
5899329b 1002 */
4b065046 1003 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 1004 WARN_ON_ONCE(1);
982d789a 1005 return -EINVAL;
5899329b 1006 }
cdecff68 1007 pgprot = __pgprot(prot);
1008 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 1009 }
1010
5899329b 1011 return 0;
5899329b 1012}
1013
1014/*
9049771f
DW
1015 * prot is passed in as a parameter for the new mapping. If the vma has
1016 * a linear pfn mapping for the entire range, or no vma is provided,
1017 * reserve the entire pfn + size range with single reserve_pfn_range
1018 * call.
5899329b 1019 */
5180da41 1020int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293 1021 unsigned long pfn, unsigned long addr, unsigned long size)
5899329b 1022{
b1a86e15 1023 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
2a374698 1024 enum page_cache_mode pcm;
5899329b 1025
b1a86e15 1026 /* reserve the whole chunk starting from paddr */
9049771f
DW
1027 if (!vma || (addr == vma->vm_start
1028 && size == (vma->vm_end - vma->vm_start))) {
b3b9c293
KK
1029 int ret;
1030
1031 ret = reserve_pfn_range(paddr, size, prot, 0);
9049771f 1032 if (ret == 0 && vma)
b3b9c293
KK
1033 vma->vm_flags |= VM_PAT;
1034 return ret;
1035 }
5899329b 1036
cb32edf6 1037 if (!pat_enabled())
10876376
VP
1038 return 0;
1039
5180da41
SS
1040 /*
1041 * For anything smaller than the vma size we set prot based on the
1042 * lookup.
1043 */
2a374698 1044 pcm = lookup_memtype(paddr);
5180da41
SS
1045
1046 /* Check memtype for the remaining pages */
1047 while (size > PAGE_SIZE) {
1048 size -= PAGE_SIZE;
1049 paddr += PAGE_SIZE;
2a374698 1050 if (pcm != lookup_memtype(paddr))
5180da41
SS
1051 return -EINVAL;
1052 }
1053
dd7b6847 1054 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
2a374698 1055 cachemode2protval(pcm));
5180da41
SS
1056
1057 return 0;
1058}
1059
308a047c 1060void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
5180da41 1061{
2a374698 1062 enum page_cache_mode pcm;
5180da41 1063
cb32edf6 1064 if (!pat_enabled())
308a047c 1065 return;
5180da41
SS
1066
1067 /* Set prot based on lookup */
f25748e3 1068 pcm = lookup_memtype(pfn_t_to_phys(pfn));
dd7b6847 1069 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
2a374698 1070 cachemode2protval(pcm));
5899329b 1071}
1072
1073/*
5180da41 1074 * untrack_pfn is called while unmapping a pfnmap for a region.
5899329b 1075 * untrack can be called for a specific region indicated by pfn and size or
b1a86e15 1076 * can be for the entire vma (in which case pfn, size are zero).
5899329b 1077 */
5180da41
SS
1078void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1079 unsigned long size)
5899329b 1080{
c1c15b65 1081 resource_size_t paddr;
b1a86e15 1082 unsigned long prot;
5899329b 1083
9049771f 1084 if (vma && !(vma->vm_flags & VM_PAT))
5899329b 1085 return;
b1a86e15
SS
1086
1087 /* free the chunk starting from pfn or the whole chunk */
1088 paddr = (resource_size_t)pfn << PAGE_SHIFT;
1089 if (!paddr && !size) {
1090 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1091 WARN_ON_ONCE(1);
1092 return;
1093 }
1094
1095 size = vma->vm_end - vma->vm_start;
5899329b 1096 }
b1a86e15 1097 free_pfn_range(paddr, size);
9049771f
DW
1098 if (vma)
1099 vma->vm_flags &= ~VM_PAT;
5899329b 1100}
1101
d9fe4fab
TK
1102/*
1103 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1104 * with the old vma after its pfnmap page table has been removed. The new
1105 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1106 */
1107void untrack_pfn_moved(struct vm_area_struct *vma)
1108{
1109 vma->vm_flags &= ~VM_PAT;
1110}
1111
2520bd31 1112pgprot_t pgprot_writecombine(pgprot_t prot)
1113{
7202fdb1 1114 return __pgprot(pgprot_val(prot) |
e00c8cc9 1115 cachemode2protval(_PAGE_CACHE_MODE_WC));
2520bd31 1116}
92b9af9e 1117EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 1118
d1b4bfbf
TK
1119pgprot_t pgprot_writethrough(pgprot_t prot)
1120{
1121 return __pgprot(pgprot_val(prot) |
1122 cachemode2protval(_PAGE_CACHE_MODE_WT));
1123}
1124EXPORT_SYMBOL_GPL(pgprot_writethrough);
1125
012f09e7 1126#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 1127
aee7f913
IM
1128/*
1129 * We are allocating a temporary printout-entry to be passed
1130 * between seq_start()/next() and seq_show():
1131 */
fec0962e 1132static struct memtype *memtype_get_idx(loff_t pos)
1133{
baf65855 1134 struct memtype *entry_print;
be5a0c12 1135 int ret;
fec0962e 1136
baf65855
IM
1137 entry_print = kzalloc(sizeof(struct memtype), GFP_KERNEL);
1138 if (!entry_print)
fec0962e 1139 return NULL;
1140
1141 spin_lock(&memtype_lock);
baf65855 1142 ret = memtype_copy_nth_element(entry_print, pos);
fec0962e 1143 spin_unlock(&memtype_lock);
ad2cde16 1144
aee7f913
IM
1145 /* Free it on error: */
1146 if (ret) {
baf65855 1147 kfree(entry_print);
be5a0c12 1148 return NULL;
1149 }
aee7f913 1150
baf65855 1151 return entry_print;
fec0962e 1152}
1153
1154static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1155{
1156 if (*pos == 0) {
1157 ++*pos;
3736708f 1158 seq_puts(seq, "PAT memtype list:\n");
fec0962e 1159 }
1160
1161 return memtype_get_idx(*pos);
1162}
1163
1164static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1165{
1166 ++*pos;
1167 return memtype_get_idx(*pos);
1168}
1169
1170static void memtype_seq_stop(struct seq_file *seq, void *v)
1171{
1172}
1173
1174static int memtype_seq_show(struct seq_file *seq, void *v)
1175{
baf65855 1176 struct memtype *entry_print = (struct memtype *)v;
fec0962e 1177
ef35b0fc 1178 seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n",
baf65855
IM
1179 entry_print->start,
1180 entry_print->end,
1181 cattr_name(entry_print->type));
aee7f913 1182
baf65855 1183 kfree(entry_print);
ad2cde16 1184
fec0962e 1185 return 0;
1186}
1187
d535e431 1188static const struct seq_operations memtype_seq_ops = {
fec0962e 1189 .start = memtype_seq_start,
1190 .next = memtype_seq_next,
1191 .stop = memtype_seq_stop,
1192 .show = memtype_seq_show,
1193};
1194
1195static int memtype_seq_open(struct inode *inode, struct file *file)
1196{
1197 return seq_open(file, &memtype_seq_ops);
1198}
1199
1200static const struct file_operations memtype_fops = {
1201 .open = memtype_seq_open,
1202 .read = seq_read,
1203 .llseek = seq_lseek,
1204 .release = seq_release,
1205};
1206
1207static int __init pat_memtype_list_init(void)
1208{
cb32edf6 1209 if (pat_enabled()) {
dd4377b0
XF
1210 debugfs_create_file("pat_memtype_list", S_IRUSR,
1211 arch_debugfs_dir, NULL, &memtype_fops);
1212 }
fec0962e 1213 return 0;
1214}
fec0962e 1215late_initcall(pat_memtype_list_init);
1216
012f09e7 1217#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */