Commit | Line | Data |
---|---|---|
2e5d9c85 | 1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
8 | */ | |
9 | ||
ad2cde16 IM |
10 | #include <linux/seq_file.h> |
11 | #include <linux/bootmem.h> | |
12 | #include <linux/debugfs.h> | |
2e5d9c85 | 13 | #include <linux/kernel.h> |
92b9af9e | 14 | #include <linux/module.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
ad2cde16 | 16 | #include <linux/mm.h> |
2e5d9c85 | 17 | #include <linux/fs.h> |
335ef896 | 18 | #include <linux/rbtree.h> |
2e5d9c85 | 19 | |
ad2cde16 | 20 | #include <asm/cacheflush.h> |
2e5d9c85 | 21 | #include <asm/processor.h> |
ad2cde16 | 22 | #include <asm/tlbflush.h> |
fd12a0d6 | 23 | #include <asm/x86_init.h> |
2e5d9c85 | 24 | #include <asm/pgtable.h> |
2e5d9c85 | 25 | #include <asm/fcntl.h> |
ad2cde16 | 26 | #include <asm/e820.h> |
2e5d9c85 | 27 | #include <asm/mtrr.h> |
ad2cde16 IM |
28 | #include <asm/page.h> |
29 | #include <asm/msr.h> | |
30 | #include <asm/pat.h> | |
e7f260a2 | 31 | #include <asm/io.h> |
2e5d9c85 | 32 | |
be5a0c12 | 33 | #include "pat_internal.h" |
bd809af1 | 34 | #include "mm_internal.h" |
be5a0c12 | 35 | |
9e76561f LR |
36 | #undef pr_fmt |
37 | #define pr_fmt(fmt) "" fmt | |
38 | ||
9dac6290 BP |
39 | static bool boot_cpu_done; |
40 | ||
cb32edf6 | 41 | static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); |
2e5d9c85 | 42 | |
1ee4bd92 | 43 | static inline void pat_disable(const char *reason) |
2e5d9c85 | 44 | { |
cb32edf6 | 45 | __pat_enabled = 0; |
9e76561f | 46 | pr_info("x86/PAT: %s\n", reason); |
2e5d9c85 | 47 | } |
2e5d9c85 | 48 | |
be524fb9 | 49 | static int __init nopat(char *str) |
2e5d9c85 | 50 | { |
8d4a4300 | 51 | pat_disable("PAT support disabled."); |
2e5d9c85 | 52 | return 0; |
53 | } | |
8d4a4300 | 54 | early_param("nopat", nopat); |
cb32edf6 LR |
55 | |
56 | bool pat_enabled(void) | |
75a04811 | 57 | { |
cb32edf6 | 58 | return !!__pat_enabled; |
75a04811 | 59 | } |
fbe7193a | 60 | EXPORT_SYMBOL_GPL(pat_enabled); |
77b52b4c | 61 | |
be5a0c12 | 62 | int pat_debug_enable; |
ad2cde16 | 63 | |
77b52b4c VP |
64 | static int __init pat_debug_setup(char *str) |
65 | { | |
be5a0c12 | 66 | pat_debug_enable = 1; |
77b52b4c VP |
67 | return 0; |
68 | } | |
69 | __setup("debugpat", pat_debug_setup); | |
70 | ||
0dbcae88 TG |
71 | #ifdef CONFIG_X86_PAT |
72 | /* | |
73 | * X86 PAT uses page flags WC and Uncached together to keep track of | |
74 | * memory type of pages that have backing page struct. X86 PAT supports 3 | |
75 | * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and | |
76 | * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not | |
77 | * been changed from its default (value of -1 used to denote this). | |
78 | * Note we do not support _PAGE_CACHE_MODE_UC here. | |
79 | */ | |
80 | ||
81 | #define _PGMT_DEFAULT 0 | |
82 | #define _PGMT_WC (1UL << PG_arch_1) | |
83 | #define _PGMT_UC_MINUS (1UL << PG_uncached) | |
84 | #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) | |
85 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) | |
86 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) | |
87 | ||
88 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | |
89 | { | |
90 | unsigned long pg_flags = pg->flags & _PGMT_MASK; | |
91 | ||
92 | if (pg_flags == _PGMT_DEFAULT) | |
93 | return -1; | |
94 | else if (pg_flags == _PGMT_WC) | |
95 | return _PAGE_CACHE_MODE_WC; | |
96 | else if (pg_flags == _PGMT_UC_MINUS) | |
97 | return _PAGE_CACHE_MODE_UC_MINUS; | |
98 | else | |
99 | return _PAGE_CACHE_MODE_WB; | |
100 | } | |
101 | ||
102 | static inline void set_page_memtype(struct page *pg, | |
103 | enum page_cache_mode memtype) | |
104 | { | |
105 | unsigned long memtype_flags; | |
106 | unsigned long old_flags; | |
107 | unsigned long new_flags; | |
108 | ||
109 | switch (memtype) { | |
110 | case _PAGE_CACHE_MODE_WC: | |
111 | memtype_flags = _PGMT_WC; | |
112 | break; | |
113 | case _PAGE_CACHE_MODE_UC_MINUS: | |
114 | memtype_flags = _PGMT_UC_MINUS; | |
115 | break; | |
116 | case _PAGE_CACHE_MODE_WB: | |
117 | memtype_flags = _PGMT_WB; | |
118 | break; | |
119 | default: | |
120 | memtype_flags = _PGMT_DEFAULT; | |
121 | break; | |
122 | } | |
123 | ||
124 | do { | |
125 | old_flags = pg->flags; | |
126 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; | |
127 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); | |
128 | } | |
129 | #else | |
130 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | |
131 | { | |
132 | return -1; | |
133 | } | |
134 | static inline void set_page_memtype(struct page *pg, | |
135 | enum page_cache_mode memtype) | |
136 | { | |
137 | } | |
138 | #endif | |
139 | ||
2e5d9c85 | 140 | enum { |
141 | PAT_UC = 0, /* uncached */ | |
142 | PAT_WC = 1, /* Write combining */ | |
143 | PAT_WT = 4, /* Write Through */ | |
144 | PAT_WP = 5, /* Write Protected */ | |
145 | PAT_WB = 6, /* Write Back (default) */ | |
146 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | |
147 | }; | |
148 | ||
bd809af1 JG |
149 | #define CM(c) (_PAGE_CACHE_MODE_ ## c) |
150 | ||
151 | static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg) | |
152 | { | |
153 | enum page_cache_mode cache; | |
154 | char *cache_mode; | |
155 | ||
156 | switch (pat_val) { | |
157 | case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; | |
158 | case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; | |
159 | case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; | |
160 | case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; | |
161 | case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; | |
162 | case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; | |
163 | default: cache = CM(WB); cache_mode = "WB "; break; | |
164 | } | |
165 | ||
166 | memcpy(msg, cache_mode, 4); | |
167 | ||
168 | return cache; | |
169 | } | |
170 | ||
171 | #undef CM | |
172 | ||
173 | /* | |
174 | * Update the cache mode to pgprot translation tables according to PAT | |
175 | * configuration. | |
176 | * Using lower indices is preferred, so we start with highest index. | |
177 | */ | |
9cd25aac | 178 | void pat_init_cache_modes(u64 pat) |
bd809af1 | 179 | { |
bd809af1 JG |
180 | enum page_cache_mode cache; |
181 | char pat_msg[33]; | |
9cd25aac | 182 | int i; |
bd809af1 | 183 | |
bd809af1 JG |
184 | pat_msg[32] = 0; |
185 | for (i = 7; i >= 0; i--) { | |
186 | cache = pat_get_cache_mode((pat >> (i * 8)) & 7, | |
187 | pat_msg + 4 * i); | |
188 | update_cache_mode_entry(i, cache); | |
189 | } | |
9e76561f | 190 | pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); |
bd809af1 JG |
191 | } |
192 | ||
cd7a4e93 | 193 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 194 | |
9dac6290 | 195 | static void pat_bsp_init(u64 pat) |
2e5d9c85 | 196 | { |
9cd25aac BP |
197 | u64 tmp_pat; |
198 | ||
9dac6290 BP |
199 | if (!cpu_has_pat) { |
200 | pat_disable("PAT not supported by CPU."); | |
201 | return; | |
202 | } | |
2e5d9c85 | 203 | |
9cd25aac BP |
204 | if (!pat_enabled()) |
205 | goto done; | |
206 | ||
207 | rdmsrl(MSR_IA32_CR_PAT, tmp_pat); | |
208 | if (!tmp_pat) { | |
9dac6290 | 209 | pat_disable("PAT MSR is 0, disabled."); |
2e5d9c85 | 210 | return; |
9dac6290 BP |
211 | } |
212 | ||
213 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
2e5d9c85 | 214 | |
9cd25aac BP |
215 | done: |
216 | pat_init_cache_modes(pat); | |
9dac6290 BP |
217 | } |
218 | ||
219 | static void pat_ap_init(u64 pat) | |
220 | { | |
9cd25aac BP |
221 | if (!pat_enabled()) |
222 | return; | |
223 | ||
75a04811 | 224 | if (!cpu_has_pat) { |
9dac6290 BP |
225 | /* |
226 | * If this happens we are on a secondary CPU, but switched to | |
227 | * PAT on the boot CPU. We have no way to undo PAT. | |
228 | */ | |
229 | panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); | |
8d4a4300 | 230 | } |
2e5d9c85 | 231 | |
9dac6290 BP |
232 | wrmsrl(MSR_IA32_CR_PAT, pat); |
233 | } | |
234 | ||
235 | void pat_init(void) | |
236 | { | |
237 | u64 pat; | |
238 | ||
9cd25aac BP |
239 | if (!pat_enabled()) { |
240 | /* | |
241 | * No PAT. Emulate the PAT table that corresponds to the two | |
242 | * cache bits, PWT (Write Through) and PCD (Cache Disable). This | |
243 | * setup is the same as the BIOS default setup when the system | |
244 | * has PAT but the "nopat" boot option has been specified. This | |
245 | * emulated PAT table is used when MSR_IA32_CR_PAT returns 0. | |
246 | * | |
247 | * PTE encoding used: | |
248 | * | |
249 | * PCD | |
250 | * |PWT PAT | |
251 | * || slot | |
252 | * 00 0 WB : _PAGE_CACHE_MODE_WB | |
253 | * 01 1 WT : _PAGE_CACHE_MODE_WT | |
254 | * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
255 | * 11 3 UC : _PAGE_CACHE_MODE_UC | |
256 | * | |
257 | * NOTE: When WC or WP is used, it is redirected to UC- per | |
258 | * the default setup in __cachemode2pte_tbl[]. | |
259 | */ | |
260 | pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | | |
261 | PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); | |
262 | } else { | |
263 | /* | |
264 | * PTE encoding used in Linux: | |
265 | * PAT | |
266 | * |PCD | |
267 | * ||PWT | |
268 | * ||| | |
269 | * 000 WB _PAGE_CACHE_WB | |
270 | * 001 WC _PAGE_CACHE_WC | |
271 | * 010 UC- _PAGE_CACHE_UC_MINUS | |
272 | * 011 UC _PAGE_CACHE_UC | |
273 | * PAT bit unused | |
274 | */ | |
275 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | | |
276 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
277 | } | |
2e5d9c85 | 278 | |
9dac6290 BP |
279 | if (!boot_cpu_done) { |
280 | pat_bsp_init(pat); | |
281 | boot_cpu_done = true; | |
282 | } else { | |
283 | pat_ap_init(pat); | |
9d34cfdf | 284 | } |
2e5d9c85 | 285 | } |
286 | ||
287 | #undef PAT | |
288 | ||
9e41a49a | 289 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
335ef896 | 290 | |
2e5d9c85 | 291 | /* |
292 | * Does intersection of PAT memory type and MTRR memory type and returns | |
293 | * the resulting memory type as PAT understands it. | |
294 | * (Type in pat and mtrr will not have same value) | |
295 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
296 | * SDM vol 3a | |
297 | */ | |
e00c8cc9 JG |
298 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
299 | enum page_cache_mode req_type) | |
2e5d9c85 | 300 | { |
c26421d0 VP |
301 | /* |
302 | * Look for MTRR hint to get the effective type in case where PAT | |
303 | * request is for WB. | |
304 | */ | |
e00c8cc9 | 305 | if (req_type == _PAGE_CACHE_MODE_WB) { |
b73522e0 | 306 | u8 mtrr_type, uniform; |
dd0c7c49 | 307 | |
b73522e0 | 308 | mtrr_type = mtrr_type_lookup(start, end, &uniform); |
b6ff32d9 | 309 | if (mtrr_type != MTRR_TYPE_WRBACK) |
e00c8cc9 | 310 | return _PAGE_CACHE_MODE_UC_MINUS; |
b6ff32d9 | 311 | |
e00c8cc9 | 312 | return _PAGE_CACHE_MODE_WB; |
dd0c7c49 AH |
313 | } |
314 | ||
315 | return req_type; | |
2e5d9c85 | 316 | } |
317 | ||
fa83523f JD |
318 | struct pagerange_state { |
319 | unsigned long cur_pfn; | |
320 | int ram; | |
321 | int not_ram; | |
322 | }; | |
323 | ||
324 | static int | |
325 | pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) | |
326 | { | |
327 | struct pagerange_state *state = arg; | |
328 | ||
329 | state->not_ram |= initial_pfn > state->cur_pfn; | |
330 | state->ram |= total_nr_pages > 0; | |
331 | state->cur_pfn = initial_pfn + total_nr_pages; | |
332 | ||
333 | return state->ram && state->not_ram; | |
334 | } | |
335 | ||
3709c857 | 336 | static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) |
be03d9e8 | 337 | { |
fa83523f JD |
338 | int ret = 0; |
339 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
340 | unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
341 | struct pagerange_state state = {start_pfn, 0, 0}; | |
342 | ||
343 | /* | |
344 | * For legacy reasons, physical address range in the legacy ISA | |
345 | * region is tracked as non-RAM. This will allow users of | |
346 | * /dev/mem to map portions of legacy ISA region, even when | |
347 | * some of those portions are listed(or not even listed) with | |
348 | * different e820 types(RAM/reserved/..) | |
349 | */ | |
350 | if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) | |
351 | start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; | |
352 | ||
353 | if (start_pfn < end_pfn) { | |
354 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, | |
355 | &state, pagerange_is_ram_callback); | |
be03d9e8 SS |
356 | } |
357 | ||
fa83523f | 358 | return (ret > 0) ? -1 : (state.ram ? 1 : 0); |
be03d9e8 SS |
359 | } |
360 | ||
9542ada8 | 361 | /* |
f5841740 VP |
362 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
363 | * Here we do two pass: | |
364 | * - Find the memtype of all the pages in the range, look for any conflicts | |
365 | * - In case of no conflicts, set the new memtype for pages in the range | |
9542ada8 | 366 | */ |
e00c8cc9 JG |
367 | static int reserve_ram_pages_type(u64 start, u64 end, |
368 | enum page_cache_mode req_type, | |
369 | enum page_cache_mode *new_type) | |
9542ada8 SS |
370 | { |
371 | struct page *page; | |
f5841740 VP |
372 | u64 pfn; |
373 | ||
e00c8cc9 | 374 | if (req_type == _PAGE_CACHE_MODE_UC) { |
f5841740 VP |
375 | /* We do not support strong UC */ |
376 | WARN_ON_ONCE(1); | |
e00c8cc9 | 377 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
f5841740 | 378 | } |
9542ada8 SS |
379 | |
380 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
e00c8cc9 | 381 | enum page_cache_mode type; |
9542ada8 | 382 | |
f5841740 VP |
383 | page = pfn_to_page(pfn); |
384 | type = get_page_memtype(page); | |
385 | if (type != -1) { | |
9e76561f | 386 | pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", |
365811d6 | 387 | start, end - 1, type, req_type); |
f5841740 VP |
388 | if (new_type) |
389 | *new_type = type; | |
390 | ||
391 | return -EBUSY; | |
392 | } | |
9542ada8 | 393 | } |
9542ada8 | 394 | |
f5841740 VP |
395 | if (new_type) |
396 | *new_type = req_type; | |
397 | ||
398 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
9542ada8 | 399 | page = pfn_to_page(pfn); |
f5841740 | 400 | set_page_memtype(page, req_type); |
9542ada8 | 401 | } |
f5841740 | 402 | return 0; |
9542ada8 SS |
403 | } |
404 | ||
405 | static int free_ram_pages_type(u64 start, u64 end) | |
406 | { | |
407 | struct page *page; | |
f5841740 | 408 | u64 pfn; |
9542ada8 SS |
409 | |
410 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
411 | page = pfn_to_page(pfn); | |
f5841740 | 412 | set_page_memtype(page, -1); |
9542ada8 SS |
413 | } |
414 | return 0; | |
9542ada8 SS |
415 | } |
416 | ||
e7f260a2 | 417 | /* |
418 | * req_type typically has one of the: | |
e00c8cc9 JG |
419 | * - _PAGE_CACHE_MODE_WB |
420 | * - _PAGE_CACHE_MODE_WC | |
421 | * - _PAGE_CACHE_MODE_UC_MINUS | |
422 | * - _PAGE_CACHE_MODE_UC | |
e7f260a2 | 423 | * |
ac97991e AH |
424 | * If new_type is NULL, function will return an error if it cannot reserve the |
425 | * region with req_type. If new_type is non-NULL, function will return | |
426 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 427 | * it will return a negative return value. |
428 | */ | |
e00c8cc9 JG |
429 | int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, |
430 | enum page_cache_mode *new_type) | |
2e5d9c85 | 431 | { |
be5a0c12 | 432 | struct memtype *new; |
e00c8cc9 | 433 | enum page_cache_mode actual_type; |
9542ada8 | 434 | int is_range_ram; |
ad2cde16 | 435 | int err = 0; |
2e5d9c85 | 436 | |
ad2cde16 | 437 | BUG_ON(start >= end); /* end is exclusive */ |
69e26be9 | 438 | |
cb32edf6 | 439 | if (!pat_enabled()) { |
e7f260a2 | 440 | /* This is identical to page table setting without PAT */ |
ac97991e | 441 | if (new_type) { |
e00c8cc9 JG |
442 | if (req_type == _PAGE_CACHE_MODE_WC) |
443 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; | |
ac97991e | 444 | else |
e00c8cc9 | 445 | *new_type = req_type; |
e7f260a2 | 446 | } |
2e5d9c85 | 447 | return 0; |
448 | } | |
449 | ||
450 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
8a271389 | 451 | if (x86_platform.is_untracked_pat_range(start, end)) { |
ac97991e | 452 | if (new_type) |
e00c8cc9 | 453 | *new_type = _PAGE_CACHE_MODE_WB; |
2e5d9c85 | 454 | return 0; |
455 | } | |
456 | ||
b6ff32d9 SS |
457 | /* |
458 | * Call mtrr_lookup to get the type hint. This is an | |
459 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
460 | * tools and ACPI tools). Use WB request for WB memory and use | |
461 | * UC_MINUS otherwise. | |
462 | */ | |
e00c8cc9 | 463 | actual_type = pat_x_mtrr_type(start, end, req_type); |
2e5d9c85 | 464 | |
95971342 SS |
465 | if (new_type) |
466 | *new_type = actual_type; | |
467 | ||
be03d9e8 | 468 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
469 | if (is_range_ram == 1) { |
470 | ||
f5841740 | 471 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
f5841740 VP |
472 | |
473 | return err; | |
474 | } else if (is_range_ram < 0) { | |
9542ada8 | 475 | return -EINVAL; |
f5841740 | 476 | } |
9542ada8 | 477 | |
6a4f3b52 | 478 | new = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
ac97991e | 479 | if (!new) |
2e5d9c85 | 480 | return -ENOMEM; |
481 | ||
ad2cde16 IM |
482 | new->start = start; |
483 | new->end = end; | |
484 | new->type = actual_type; | |
2e5d9c85 | 485 | |
2e5d9c85 | 486 | spin_lock(&memtype_lock); |
487 | ||
9e41a49a | 488 | err = rbt_memtype_check_insert(new, new_type); |
2e5d9c85 | 489 | if (err) { |
9e76561f LR |
490 | pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", |
491 | start, end - 1, | |
492 | cattr_name(new->type), cattr_name(req_type)); | |
ac97991e | 493 | kfree(new); |
2e5d9c85 | 494 | spin_unlock(&memtype_lock); |
ad2cde16 | 495 | |
2e5d9c85 | 496 | return err; |
497 | } | |
498 | ||
2e5d9c85 | 499 | spin_unlock(&memtype_lock); |
3e9c83b3 | 500 | |
365811d6 BH |
501 | dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", |
502 | start, end - 1, cattr_name(new->type), cattr_name(req_type), | |
3e9c83b3 AH |
503 | new_type ? cattr_name(*new_type) : "-"); |
504 | ||
2e5d9c85 | 505 | return err; |
506 | } | |
507 | ||
508 | int free_memtype(u64 start, u64 end) | |
509 | { | |
2e5d9c85 | 510 | int err = -EINVAL; |
9542ada8 | 511 | int is_range_ram; |
20413f27 | 512 | struct memtype *entry; |
2e5d9c85 | 513 | |
cb32edf6 | 514 | if (!pat_enabled()) |
2e5d9c85 | 515 | return 0; |
2e5d9c85 | 516 | |
517 | /* Low ISA region is always mapped WB. No need to track */ | |
8a271389 | 518 | if (x86_platform.is_untracked_pat_range(start, end)) |
2e5d9c85 | 519 | return 0; |
2e5d9c85 | 520 | |
be03d9e8 | 521 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
522 | if (is_range_ram == 1) { |
523 | ||
f5841740 | 524 | err = free_ram_pages_type(start, end); |
f5841740 VP |
525 | |
526 | return err; | |
527 | } else if (is_range_ram < 0) { | |
9542ada8 | 528 | return -EINVAL; |
f5841740 | 529 | } |
9542ada8 | 530 | |
2e5d9c85 | 531 | spin_lock(&memtype_lock); |
20413f27 | 532 | entry = rbt_memtype_erase(start, end); |
2e5d9c85 | 533 | spin_unlock(&memtype_lock); |
534 | ||
20413f27 | 535 | if (!entry) { |
9e76561f LR |
536 | pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", |
537 | current->comm, current->pid, start, end - 1); | |
20413f27 | 538 | return -EINVAL; |
2e5d9c85 | 539 | } |
6997ab49 | 540 | |
20413f27 XF |
541 | kfree(entry); |
542 | ||
365811d6 | 543 | dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); |
ad2cde16 | 544 | |
20413f27 | 545 | return 0; |
2e5d9c85 | 546 | } |
547 | ||
f0970c13 | 548 | |
637b86e7 VP |
549 | /** |
550 | * lookup_memtype - Looksup the memory type for a physical address | |
551 | * @paddr: physical address of which memory type needs to be looked up | |
552 | * | |
553 | * Only to be called when PAT is enabled | |
554 | * | |
2a374698 JG |
555 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
556 | * or _PAGE_CACHE_MODE_UC | |
637b86e7 | 557 | */ |
2a374698 | 558 | static enum page_cache_mode lookup_memtype(u64 paddr) |
637b86e7 | 559 | { |
2a374698 | 560 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
637b86e7 VP |
561 | struct memtype *entry; |
562 | ||
8a271389 | 563 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
637b86e7 VP |
564 | return rettype; |
565 | ||
566 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | |
567 | struct page *page; | |
637b86e7 VP |
568 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
569 | rettype = get_page_memtype(page); | |
637b86e7 VP |
570 | /* |
571 | * -1 from get_page_memtype() implies RAM page is in its | |
572 | * default state and not reserved, and hence of type WB | |
573 | */ | |
574 | if (rettype == -1) | |
2a374698 | 575 | rettype = _PAGE_CACHE_MODE_WB; |
637b86e7 VP |
576 | |
577 | return rettype; | |
578 | } | |
579 | ||
580 | spin_lock(&memtype_lock); | |
581 | ||
9e41a49a | 582 | entry = rbt_memtype_lookup(paddr); |
637b86e7 VP |
583 | if (entry != NULL) |
584 | rettype = entry->type; | |
585 | else | |
2a374698 | 586 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
637b86e7 VP |
587 | |
588 | spin_unlock(&memtype_lock); | |
589 | return rettype; | |
590 | } | |
591 | ||
9fd126bc VP |
592 | /** |
593 | * io_reserve_memtype - Request a memory type mapping for a region of memory | |
594 | * @start: start (physical address) of the region | |
595 | * @end: end (physical address) of the region | |
596 | * @type: A pointer to memtype, with requested type. On success, requested | |
597 | * or any other compatible type that was available for the region is returned | |
598 | * | |
599 | * On success, returns 0 | |
600 | * On failure, returns non-zero | |
601 | */ | |
602 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | |
49a3b3cb | 603 | enum page_cache_mode *type) |
9fd126bc | 604 | { |
b855192c | 605 | resource_size_t size = end - start; |
49a3b3cb JG |
606 | enum page_cache_mode req_type = *type; |
607 | enum page_cache_mode new_type; | |
9fd126bc VP |
608 | int ret; |
609 | ||
b855192c | 610 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
9fd126bc VP |
611 | |
612 | ret = reserve_memtype(start, end, req_type, &new_type); | |
613 | if (ret) | |
614 | goto out_err; | |
615 | ||
b855192c | 616 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
9fd126bc VP |
617 | goto out_free; |
618 | ||
b855192c | 619 | if (kernel_map_sync_memtype(start, size, new_type) < 0) |
9fd126bc VP |
620 | goto out_free; |
621 | ||
622 | *type = new_type; | |
623 | return 0; | |
624 | ||
625 | out_free: | |
626 | free_memtype(start, end); | |
627 | ret = -EBUSY; | |
628 | out_err: | |
629 | return ret; | |
630 | } | |
631 | ||
632 | /** | |
633 | * io_free_memtype - Release a memory type mapping for a region of memory | |
634 | * @start: start (physical address) of the region | |
635 | * @end: end (physical address) of the region | |
636 | */ | |
637 | void io_free_memtype(resource_size_t start, resource_size_t end) | |
638 | { | |
639 | free_memtype(start, end); | |
640 | } | |
641 | ||
f0970c13 | 642 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
643 | unsigned long size, pgprot_t vma_prot) | |
644 | { | |
645 | return vma_prot; | |
646 | } | |
647 | ||
d092633b | 648 | #ifdef CONFIG_STRICT_DEVMEM |
1f40a8bf | 649 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ |
0124cecf VP |
650 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
651 | { | |
652 | return 1; | |
653 | } | |
654 | #else | |
9e41bff2 | 655 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
0124cecf VP |
656 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
657 | { | |
658 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
659 | u64 to = from + size; | |
660 | u64 cursor = from; | |
661 | ||
cb32edf6 | 662 | if (!pat_enabled()) |
9e41bff2 RT |
663 | return 1; |
664 | ||
0124cecf VP |
665 | while (cursor < to) { |
666 | if (!devmem_is_allowed(pfn)) { | |
9e76561f LR |
667 | pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n", |
668 | current->comm, from, to - 1); | |
0124cecf VP |
669 | return 0; |
670 | } | |
671 | cursor += PAGE_SIZE; | |
672 | pfn++; | |
673 | } | |
674 | return 1; | |
675 | } | |
d092633b | 676 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 677 | |
f0970c13 | 678 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
679 | unsigned long size, pgprot_t *vma_prot) | |
680 | { | |
e00c8cc9 | 681 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
f0970c13 | 682 | |
0124cecf VP |
683 | if (!range_is_allowed(pfn, size)) |
684 | return 0; | |
685 | ||
6b2f3d1f | 686 | if (file->f_flags & O_DSYNC) |
e00c8cc9 | 687 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
f0970c13 | 688 | |
689 | #ifdef CONFIG_X86_32 | |
690 | /* | |
691 | * On the PPro and successors, the MTRRs are used to set | |
692 | * memory types for physical addresses outside main memory, | |
693 | * so blindly setting UC or PWT on those pages is wrong. | |
694 | * For Pentiums and earlier, the surround logic should disable | |
695 | * caching for the high addresses through the KEN pin, but | |
696 | * we maintain the tradition of paranoia in this code. | |
697 | */ | |
cb32edf6 | 698 | if (!pat_enabled() && |
cd7a4e93 AH |
699 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
700 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | |
701 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | |
702 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | |
703 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | |
e00c8cc9 | 704 | pcm = _PAGE_CACHE_MODE_UC; |
f0970c13 | 705 | } |
706 | #endif | |
707 | ||
e7f260a2 | 708 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
e00c8cc9 | 709 | cachemode2protval(pcm)); |
f0970c13 | 710 | return 1; |
711 | } | |
e7f260a2 | 712 | |
7880f746 VP |
713 | /* |
714 | * Change the memory type for the physial address range in kernel identity | |
715 | * mapping space if that range is a part of identity map. | |
716 | */ | |
b14097bd JG |
717 | int kernel_map_sync_memtype(u64 base, unsigned long size, |
718 | enum page_cache_mode pcm) | |
7880f746 VP |
719 | { |
720 | unsigned long id_sz; | |
721 | ||
a25b9316 | 722 | if (base > __pa(high_memory-1)) |
7880f746 VP |
723 | return 0; |
724 | ||
60f583d5 DH |
725 | /* |
726 | * some areas in the middle of the kernel identity range | |
727 | * are not mapped, like the PCI space. | |
728 | */ | |
729 | if (!page_is_ram(base >> PAGE_SHIFT)) | |
730 | return 0; | |
731 | ||
a25b9316 | 732 | id_sz = (__pa(high_memory-1) <= base + size) ? |
7880f746 VP |
733 | __pa(high_memory) - base : |
734 | size; | |
735 | ||
b14097bd | 736 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { |
9e76561f | 737 | pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", |
7880f746 | 738 | current->comm, current->pid, |
e00c8cc9 | 739 | cattr_name(pcm), |
365811d6 | 740 | base, (unsigned long long)(base + size-1)); |
7880f746 VP |
741 | return -EINVAL; |
742 | } | |
743 | return 0; | |
744 | } | |
745 | ||
5899329b | 746 | /* |
747 | * Internal interface to reserve a range of physical memory with prot. | |
748 | * Reserved non RAM regions only and after successful reserve_memtype, | |
749 | * this func also keeps identity mapping (if any) in sync with this new prot. | |
750 | */ | |
cdecff68 | 751 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
752 | int strict_prot) | |
5899329b | 753 | { |
754 | int is_ram = 0; | |
7880f746 | 755 | int ret; |
e00c8cc9 JG |
756 | enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); |
757 | enum page_cache_mode pcm = want_pcm; | |
5899329b | 758 | |
be03d9e8 | 759 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 760 | |
be03d9e8 | 761 | /* |
d886c73c VP |
762 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
763 | * track of number of mappings of RAM pages. We can assert that | |
764 | * the type requested matches the type of first page in the range. | |
be03d9e8 | 765 | */ |
d886c73c | 766 | if (is_ram) { |
cb32edf6 | 767 | if (!pat_enabled()) |
d886c73c VP |
768 | return 0; |
769 | ||
e00c8cc9 JG |
770 | pcm = lookup_memtype(paddr); |
771 | if (want_pcm != pcm) { | |
9e76561f | 772 | pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
d886c73c | 773 | current->comm, current->pid, |
e00c8cc9 | 774 | cattr_name(want_pcm), |
d886c73c | 775 | (unsigned long long)paddr, |
365811d6 | 776 | (unsigned long long)(paddr + size - 1), |
e00c8cc9 | 777 | cattr_name(pcm)); |
d886c73c | 778 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
e00c8cc9 JG |
779 | (~_PAGE_CACHE_MASK)) | |
780 | cachemode2protval(pcm)); | |
d886c73c | 781 | } |
4bb9c5c0 | 782 | return 0; |
d886c73c | 783 | } |
5899329b | 784 | |
e00c8cc9 | 785 | ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); |
5899329b | 786 | if (ret) |
787 | return ret; | |
788 | ||
e00c8cc9 | 789 | if (pcm != want_pcm) { |
1adcaafe | 790 | if (strict_prot || |
e00c8cc9 | 791 | !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { |
cdecff68 | 792 | free_memtype(paddr, paddr + size); |
9e76561f LR |
793 | pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", |
794 | current->comm, current->pid, | |
795 | cattr_name(want_pcm), | |
796 | (unsigned long long)paddr, | |
797 | (unsigned long long)(paddr + size - 1), | |
798 | cattr_name(pcm)); | |
cdecff68 | 799 | return -EINVAL; |
800 | } | |
801 | /* | |
802 | * We allow returning different type than the one requested in | |
803 | * non strict case. | |
804 | */ | |
805 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
806 | (~_PAGE_CACHE_MASK)) | | |
e00c8cc9 | 807 | cachemode2protval(pcm)); |
5899329b | 808 | } |
809 | ||
e00c8cc9 | 810 | if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { |
5899329b | 811 | free_memtype(paddr, paddr + size); |
5899329b | 812 | return -EINVAL; |
813 | } | |
814 | return 0; | |
815 | } | |
816 | ||
817 | /* | |
818 | * Internal interface to free a range of physical memory. | |
819 | * Frees non RAM regions only. | |
820 | */ | |
821 | static void free_pfn_range(u64 paddr, unsigned long size) | |
822 | { | |
823 | int is_ram; | |
824 | ||
be03d9e8 | 825 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 826 | if (is_ram == 0) |
827 | free_memtype(paddr, paddr + size); | |
828 | } | |
829 | ||
830 | /* | |
5180da41 | 831 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
5899329b | 832 | * copied through copy_page_range(). |
833 | * | |
834 | * If the vma has a linear pfn mapping for the entire range, we get the prot | |
835 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | |
5899329b | 836 | */ |
5180da41 | 837 | int track_pfn_copy(struct vm_area_struct *vma) |
5899329b | 838 | { |
c1c15b65 | 839 | resource_size_t paddr; |
982d789a | 840 | unsigned long prot; |
4b065046 | 841 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
cdecff68 | 842 | pgprot_t pgprot; |
5899329b | 843 | |
b3b9c293 | 844 | if (vma->vm_flags & VM_PAT) { |
5899329b | 845 | /* |
982d789a | 846 | * reserve the whole chunk covered by vma. We need the |
847 | * starting address and protection from pte. | |
5899329b | 848 | */ |
4b065046 | 849 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
5899329b | 850 | WARN_ON_ONCE(1); |
982d789a | 851 | return -EINVAL; |
5899329b | 852 | } |
cdecff68 | 853 | pgprot = __pgprot(prot); |
854 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | |
5899329b | 855 | } |
856 | ||
5899329b | 857 | return 0; |
5899329b | 858 | } |
859 | ||
860 | /* | |
5899329b | 861 | * prot is passed in as a parameter for the new mapping. If the vma has a |
862 | * linear pfn mapping for the entire range reserve the entire vma range with | |
863 | * single reserve_pfn_range call. | |
5899329b | 864 | */ |
5180da41 | 865 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
b3b9c293 | 866 | unsigned long pfn, unsigned long addr, unsigned long size) |
5899329b | 867 | { |
b1a86e15 | 868 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
2a374698 | 869 | enum page_cache_mode pcm; |
5899329b | 870 | |
b1a86e15 | 871 | /* reserve the whole chunk starting from paddr */ |
b3b9c293 KK |
872 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { |
873 | int ret; | |
874 | ||
875 | ret = reserve_pfn_range(paddr, size, prot, 0); | |
876 | if (!ret) | |
877 | vma->vm_flags |= VM_PAT; | |
878 | return ret; | |
879 | } | |
5899329b | 880 | |
cb32edf6 | 881 | if (!pat_enabled()) |
10876376 VP |
882 | return 0; |
883 | ||
5180da41 SS |
884 | /* |
885 | * For anything smaller than the vma size we set prot based on the | |
886 | * lookup. | |
887 | */ | |
2a374698 | 888 | pcm = lookup_memtype(paddr); |
5180da41 SS |
889 | |
890 | /* Check memtype for the remaining pages */ | |
891 | while (size > PAGE_SIZE) { | |
892 | size -= PAGE_SIZE; | |
893 | paddr += PAGE_SIZE; | |
2a374698 | 894 | if (pcm != lookup_memtype(paddr)) |
5180da41 SS |
895 | return -EINVAL; |
896 | } | |
897 | ||
898 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | |
2a374698 | 899 | cachemode2protval(pcm)); |
5180da41 SS |
900 | |
901 | return 0; | |
902 | } | |
903 | ||
904 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, | |
905 | unsigned long pfn) | |
906 | { | |
2a374698 | 907 | enum page_cache_mode pcm; |
5180da41 | 908 | |
cb32edf6 | 909 | if (!pat_enabled()) |
5180da41 SS |
910 | return 0; |
911 | ||
912 | /* Set prot based on lookup */ | |
2a374698 | 913 | pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); |
10876376 | 914 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
2a374698 | 915 | cachemode2protval(pcm)); |
10876376 | 916 | |
5899329b | 917 | return 0; |
5899329b | 918 | } |
919 | ||
920 | /* | |
5180da41 | 921 | * untrack_pfn is called while unmapping a pfnmap for a region. |
5899329b | 922 | * untrack can be called for a specific region indicated by pfn and size or |
b1a86e15 | 923 | * can be for the entire vma (in which case pfn, size are zero). |
5899329b | 924 | */ |
5180da41 SS |
925 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
926 | unsigned long size) | |
5899329b | 927 | { |
c1c15b65 | 928 | resource_size_t paddr; |
b1a86e15 | 929 | unsigned long prot; |
5899329b | 930 | |
b3b9c293 | 931 | if (!(vma->vm_flags & VM_PAT)) |
5899329b | 932 | return; |
b1a86e15 SS |
933 | |
934 | /* free the chunk starting from pfn or the whole chunk */ | |
935 | paddr = (resource_size_t)pfn << PAGE_SHIFT; | |
936 | if (!paddr && !size) { | |
937 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { | |
938 | WARN_ON_ONCE(1); | |
939 | return; | |
940 | } | |
941 | ||
942 | size = vma->vm_end - vma->vm_start; | |
5899329b | 943 | } |
b1a86e15 | 944 | free_pfn_range(paddr, size); |
b3b9c293 | 945 | vma->vm_flags &= ~VM_PAT; |
5899329b | 946 | } |
947 | ||
2520bd31 | 948 | pgprot_t pgprot_writecombine(pgprot_t prot) |
949 | { | |
cb32edf6 | 950 | if (pat_enabled()) |
e00c8cc9 JG |
951 | return __pgprot(pgprot_val(prot) | |
952 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | |
2520bd31 | 953 | else |
954 | return pgprot_noncached(prot); | |
955 | } | |
92b9af9e | 956 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
2520bd31 | 957 | |
012f09e7 | 958 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 959 | |
fec0962e | 960 | static struct memtype *memtype_get_idx(loff_t pos) |
961 | { | |
be5a0c12 | 962 | struct memtype *print_entry; |
963 | int ret; | |
fec0962e | 964 | |
be5a0c12 | 965 | print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
fec0962e | 966 | if (!print_entry) |
967 | return NULL; | |
968 | ||
969 | spin_lock(&memtype_lock); | |
9e41a49a | 970 | ret = rbt_memtype_copy_nth_element(print_entry, pos); |
fec0962e | 971 | spin_unlock(&memtype_lock); |
ad2cde16 | 972 | |
be5a0c12 | 973 | if (!ret) { |
974 | return print_entry; | |
975 | } else { | |
976 | kfree(print_entry); | |
977 | return NULL; | |
978 | } | |
fec0962e | 979 | } |
980 | ||
981 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
982 | { | |
983 | if (*pos == 0) { | |
984 | ++*pos; | |
3736708f | 985 | seq_puts(seq, "PAT memtype list:\n"); |
fec0962e | 986 | } |
987 | ||
988 | return memtype_get_idx(*pos); | |
989 | } | |
990 | ||
991 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
992 | { | |
993 | ++*pos; | |
994 | return memtype_get_idx(*pos); | |
995 | } | |
996 | ||
997 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
998 | { | |
999 | } | |
1000 | ||
1001 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
1002 | { | |
1003 | struct memtype *print_entry = (struct memtype *)v; | |
1004 | ||
1005 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | |
1006 | print_entry->start, print_entry->end); | |
1007 | kfree(print_entry); | |
ad2cde16 | 1008 | |
fec0962e | 1009 | return 0; |
1010 | } | |
1011 | ||
d535e431 | 1012 | static const struct seq_operations memtype_seq_ops = { |
fec0962e | 1013 | .start = memtype_seq_start, |
1014 | .next = memtype_seq_next, | |
1015 | .stop = memtype_seq_stop, | |
1016 | .show = memtype_seq_show, | |
1017 | }; | |
1018 | ||
1019 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
1020 | { | |
1021 | return seq_open(file, &memtype_seq_ops); | |
1022 | } | |
1023 | ||
1024 | static const struct file_operations memtype_fops = { | |
1025 | .open = memtype_seq_open, | |
1026 | .read = seq_read, | |
1027 | .llseek = seq_lseek, | |
1028 | .release = seq_release, | |
1029 | }; | |
1030 | ||
1031 | static int __init pat_memtype_list_init(void) | |
1032 | { | |
cb32edf6 | 1033 | if (pat_enabled()) { |
dd4377b0 XF |
1034 | debugfs_create_file("pat_memtype_list", S_IRUSR, |
1035 | arch_debugfs_dir, NULL, &memtype_fops); | |
1036 | } | |
fec0962e | 1037 | return 0; |
1038 | } | |
1039 | ||
1040 | late_initcall(pat_memtype_list_init); | |
1041 | ||
012f09e7 | 1042 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |