Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2e5d9c85 | 2 | /* |
aee7f913 | 3 | * Page Attribute Table (PAT) support: handle memory caching attributes in page tables. |
2e5d9c85 | 4 | * |
5 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
6 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
7 | * | |
8 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
aee7f913 IM |
9 | * |
10 | * Basic principles: | |
11 | * | |
12 | * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and | |
13 | * the kernel to set one of a handful of 'caching type' attributes for physical | |
14 | * memory ranges: uncached, write-combining, write-through, write-protected, | |
15 | * and the most commonly used and default attribute: write-back caching. | |
16 | * | |
17 | * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is | |
18 | * a hardware interface to enumerate a limited number of physical memory ranges | |
19 | * and set their caching attributes explicitly, programmed into the CPU via MSRs. | |
20 | * Even modern CPUs have MTRRs enabled - but these are typically not touched | |
21 | * by the kernel or by user-space (such as the X server), we rely on PAT for any | |
22 | * additional cache attribute logic. | |
23 | * | |
24 | * PAT doesn't work via explicit memory ranges, but uses page table entries to add | |
25 | * cache attribute information to the mapped memory range: there's 3 bits used, | |
26 | * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the | |
27 | * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT). | |
28 | * | |
29 | * ( There's a metric ton of finer details, such as compatibility with CPU quirks | |
30 | * that only support 4 types of PAT entries, and interaction with MTRRs, see | |
31 | * below for details. ) | |
2e5d9c85 | 32 | */ |
33 | ||
ad2cde16 | 34 | #include <linux/seq_file.h> |
57c8a661 | 35 | #include <linux/memblock.h> |
ad2cde16 | 36 | #include <linux/debugfs.h> |
9de94dbb | 37 | #include <linux/ioport.h> |
2e5d9c85 | 38 | #include <linux/kernel.h> |
f25748e3 | 39 | #include <linux/pfn_t.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
ad2cde16 | 41 | #include <linux/mm.h> |
2e5d9c85 | 42 | #include <linux/fs.h> |
335ef896 | 43 | #include <linux/rbtree.h> |
2e5d9c85 | 44 | |
ad2cde16 | 45 | #include <asm/cacheflush.h> |
adfe7512 | 46 | #include <asm/cacheinfo.h> |
2e5d9c85 | 47 | #include <asm/processor.h> |
ad2cde16 | 48 | #include <asm/tlbflush.h> |
fd12a0d6 | 49 | #include <asm/x86_init.h> |
2e5d9c85 | 50 | #include <asm/fcntl.h> |
66441bd3 | 51 | #include <asm/e820/api.h> |
2e5d9c85 | 52 | #include <asm/mtrr.h> |
ad2cde16 IM |
53 | #include <asm/page.h> |
54 | #include <asm/msr.h> | |
eb243d1d | 55 | #include <asm/memtype.h> |
e7f260a2 | 56 | #include <asm/io.h> |
2e5d9c85 | 57 | |
f9b57cf8 IM |
58 | #include "memtype.h" |
59 | #include "../mm_internal.h" | |
be5a0c12 | 60 | |
9e76561f LR |
61 | #undef pr_fmt |
62 | #define pr_fmt(fmt) "" fmt | |
63 | ||
99c13b8c | 64 | static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); |
adfe7512 | 65 | static u64 __ro_after_init pat_msr_val; |
2e5d9c85 | 66 | |
5557e831 IM |
67 | /* |
68 | * PAT support is enabled by default, but can be disabled for | |
69 | * various user-requested or hardware-forced reasons: | |
70 | */ | |
adfe7512 | 71 | static void __init pat_disable(const char *msg_reason) |
2e5d9c85 | 72 | { |
99c13b8c | 73 | if (pat_disabled) |
224bb1e5 TK |
74 | return; |
75 | ||
99c13b8c | 76 | pat_disabled = true; |
5557e831 | 77 | pr_info("x86/PAT: %s\n", msg_reason); |
adfe7512 JG |
78 | |
79 | memory_caching_control &= ~CACHE_PAT; | |
2e5d9c85 | 80 | } |
2e5d9c85 | 81 | |
be524fb9 | 82 | static int __init nopat(char *str) |
2e5d9c85 | 83 | { |
5557e831 | 84 | pat_disable("PAT support disabled via boot option."); |
2e5d9c85 | 85 | return 0; |
86 | } | |
8d4a4300 | 87 | early_param("nopat", nopat); |
cb32edf6 LR |
88 | |
89 | bool pat_enabled(void) | |
75a04811 | 90 | { |
adfe7512 | 91 | return !pat_disabled; |
75a04811 | 92 | } |
fbe7193a | 93 | EXPORT_SYMBOL_GPL(pat_enabled); |
77b52b4c | 94 | |
be5a0c12 | 95 | int pat_debug_enable; |
ad2cde16 | 96 | |
77b52b4c VP |
97 | static int __init pat_debug_setup(char *str) |
98 | { | |
be5a0c12 | 99 | pat_debug_enable = 1; |
12441ccd | 100 | return 1; |
77b52b4c VP |
101 | } |
102 | __setup("debugpat", pat_debug_setup); | |
103 | ||
0dbcae88 TG |
104 | #ifdef CONFIG_X86_PAT |
105 | /* | |
35a5a104 TK |
106 | * X86 PAT uses page flags arch_1 and uncached together to keep track of |
107 | * memory type of pages that have backing page struct. | |
108 | * | |
109 | * X86 PAT supports 4 different memory types: | |
110 | * - _PAGE_CACHE_MODE_WB | |
111 | * - _PAGE_CACHE_MODE_WC | |
112 | * - _PAGE_CACHE_MODE_UC_MINUS | |
113 | * - _PAGE_CACHE_MODE_WT | |
114 | * | |
115 | * _PAGE_CACHE_MODE_WB is the default type. | |
0dbcae88 TG |
116 | */ |
117 | ||
35a5a104 | 118 | #define _PGMT_WB 0 |
0dbcae88 TG |
119 | #define _PGMT_WC (1UL << PG_arch_1) |
120 | #define _PGMT_UC_MINUS (1UL << PG_uncached) | |
35a5a104 | 121 | #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1) |
0dbcae88 TG |
122 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) |
123 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) | |
124 | ||
125 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | |
126 | { | |
127 | unsigned long pg_flags = pg->flags & _PGMT_MASK; | |
128 | ||
35a5a104 TK |
129 | if (pg_flags == _PGMT_WB) |
130 | return _PAGE_CACHE_MODE_WB; | |
0dbcae88 TG |
131 | else if (pg_flags == _PGMT_WC) |
132 | return _PAGE_CACHE_MODE_WC; | |
133 | else if (pg_flags == _PGMT_UC_MINUS) | |
134 | return _PAGE_CACHE_MODE_UC_MINUS; | |
135 | else | |
35a5a104 | 136 | return _PAGE_CACHE_MODE_WT; |
0dbcae88 TG |
137 | } |
138 | ||
139 | static inline void set_page_memtype(struct page *pg, | |
140 | enum page_cache_mode memtype) | |
141 | { | |
142 | unsigned long memtype_flags; | |
143 | unsigned long old_flags; | |
144 | unsigned long new_flags; | |
145 | ||
146 | switch (memtype) { | |
147 | case _PAGE_CACHE_MODE_WC: | |
148 | memtype_flags = _PGMT_WC; | |
149 | break; | |
150 | case _PAGE_CACHE_MODE_UC_MINUS: | |
151 | memtype_flags = _PGMT_UC_MINUS; | |
152 | break; | |
35a5a104 TK |
153 | case _PAGE_CACHE_MODE_WT: |
154 | memtype_flags = _PGMT_WT; | |
0dbcae88 | 155 | break; |
35a5a104 | 156 | case _PAGE_CACHE_MODE_WB: |
0dbcae88 | 157 | default: |
35a5a104 | 158 | memtype_flags = _PGMT_WB; |
0dbcae88 TG |
159 | break; |
160 | } | |
161 | ||
162 | do { | |
163 | old_flags = pg->flags; | |
164 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; | |
165 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); | |
166 | } | |
167 | #else | |
168 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | |
169 | { | |
170 | return -1; | |
171 | } | |
172 | static inline void set_page_memtype(struct page *pg, | |
173 | enum page_cache_mode memtype) | |
174 | { | |
175 | } | |
176 | #endif | |
177 | ||
2e5d9c85 | 178 | enum { |
179 | PAT_UC = 0, /* uncached */ | |
180 | PAT_WC = 1, /* Write combining */ | |
181 | PAT_WT = 4, /* Write Through */ | |
182 | PAT_WP = 5, /* Write Protected */ | |
183 | PAT_WB = 6, /* Write Back (default) */ | |
6a6256f9 | 184 | PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ |
2e5d9c85 | 185 | }; |
186 | ||
bd809af1 JG |
187 | #define CM(c) (_PAGE_CACHE_MODE_ ## c) |
188 | ||
adfe7512 JG |
189 | static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val, |
190 | char *msg) | |
bd809af1 JG |
191 | { |
192 | enum page_cache_mode cache; | |
193 | char *cache_mode; | |
194 | ||
195 | switch (pat_val) { | |
196 | case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; | |
197 | case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; | |
198 | case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; | |
199 | case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; | |
200 | case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; | |
201 | case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; | |
202 | default: cache = CM(WB); cache_mode = "WB "; break; | |
203 | } | |
204 | ||
205 | memcpy(msg, cache_mode, 4); | |
206 | ||
207 | return cache; | |
208 | } | |
209 | ||
210 | #undef CM | |
211 | ||
212 | /* | |
213 | * Update the cache mode to pgprot translation tables according to PAT | |
214 | * configuration. | |
215 | * Using lower indices is preferred, so we start with highest index. | |
216 | */ | |
adfe7512 | 217 | static void __init init_cache_modes(u64 pat) |
bd809af1 | 218 | { |
bd809af1 JG |
219 | enum page_cache_mode cache; |
220 | char pat_msg[33]; | |
9cd25aac | 221 | int i; |
bd809af1 | 222 | |
bd809af1 JG |
223 | pat_msg[32] = 0; |
224 | for (i = 7; i >= 0; i--) { | |
225 | cache = pat_get_cache_mode((pat >> (i * 8)) & 7, | |
226 | pat_msg + 4 * i); | |
227 | update_cache_mode_entry(i, cache); | |
228 | } | |
9e76561f | 229 | pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); |
9dac6290 BP |
230 | } |
231 | ||
adfe7512 | 232 | void pat_cpu_init(void) |
9dac6290 | 233 | { |
c08d5174 | 234 | if (!boot_cpu_has(X86_FEATURE_PAT)) { |
9dac6290 BP |
235 | /* |
236 | * If this happens we are on a secondary CPU, but switched to | |
237 | * PAT on the boot CPU. We have no way to undo PAT. | |
238 | */ | |
239 | panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); | |
8d4a4300 | 240 | } |
2e5d9c85 | 241 | |
adfe7512 | 242 | wrmsrl(MSR_IA32_CR_PAT, pat_msr_val); |
9dac6290 BP |
243 | } |
244 | ||
adfe7512 JG |
245 | /** |
246 | * pat_bp_init - Initialize the PAT MSR value and PAT table | |
247 | * | |
248 | * This function initializes PAT MSR value and PAT table with an OS-defined | |
249 | * value to enable additional cache attributes, WC, WT and WP. | |
250 | * | |
251 | * This function prepares the calls of pat_cpu_init() via cache_cpu_init() | |
252 | * on all CPUs. | |
253 | */ | |
254 | void __init pat_bp_init(void) | |
9dac6290 | 255 | { |
adfe7512 JG |
256 | struct cpuinfo_x86 *c = &boot_cpu_data; |
257 | #define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \ | |
258 | (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \ | |
259 | ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \ | |
260 | ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \ | |
261 | ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56)) | |
9dac6290 | 262 | |
02f037d6 | 263 | |
adfe7512 JG |
264 | if (!IS_ENABLED(CONFIG_X86_PAT)) |
265 | pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n"); | |
266 | ||
267 | if (!cpu_feature_enabled(X86_FEATURE_PAT)) | |
268 | pat_disable("PAT not supported by the CPU."); | |
269 | else | |
270 | rdmsrl(MSR_IA32_CR_PAT, pat_msr_val); | |
271 | ||
272 | if (!pat_msr_val) { | |
273 | pat_disable("PAT support disabled by the firmware."); | |
02f037d6 | 274 | |
9cd25aac BP |
275 | /* |
276 | * No PAT. Emulate the PAT table that corresponds to the two | |
02f037d6 TK |
277 | * cache bits, PWT (Write Through) and PCD (Cache Disable). |
278 | * This setup is also the same as the BIOS default setup. | |
9cd25aac | 279 | * |
d79a40ca | 280 | * PTE encoding: |
9cd25aac BP |
281 | * |
282 | * PCD | |
283 | * |PWT PAT | |
284 | * || slot | |
285 | * 00 0 WB : _PAGE_CACHE_MODE_WB | |
286 | * 01 1 WT : _PAGE_CACHE_MODE_WT | |
287 | * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
288 | * 11 3 UC : _PAGE_CACHE_MODE_UC | |
289 | * | |
290 | * NOTE: When WC or WP is used, it is redirected to UC- per | |
291 | * the default setup in __cachemode2pte_tbl[]. | |
292 | */ | |
adfe7512 | 293 | pat_msr_val = PAT(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC); |
02f037d6 TK |
294 | } |
295 | ||
adfe7512 JG |
296 | /* |
297 | * Xen PV doesn't allow to set PAT MSR, but all cache modes are | |
298 | * supported. | |
c11ca454 JG |
299 | * When running as TDX guest setting the PAT MSR won't work either |
300 | * due to the requirement to set CR0.CD when doing so. Rely on | |
301 | * firmware to have set the PAT MSR correctly. | |
adfe7512 | 302 | */ |
c11ca454 JG |
303 | if (pat_disabled || |
304 | cpu_feature_enabled(X86_FEATURE_XENPV) || | |
305 | cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { | |
adfe7512 | 306 | init_cache_modes(pat_msr_val); |
02f037d6 | 307 | return; |
adfe7512 | 308 | } |
d79a40ca | 309 | |
02f037d6 TK |
310 | if ((c->x86_vendor == X86_VENDOR_INTEL) && |
311 | (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || | |
312 | ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { | |
9cd25aac | 313 | /* |
d79a40ca TK |
314 | * PAT support with the lower four entries. Intel Pentium 2, |
315 | * 3, M, and 4 are affected by PAT errata, which makes the | |
316 | * upper four entries unusable. To be on the safe side, we don't | |
317 | * use those. | |
318 | * | |
319 | * PTE encoding: | |
9cd25aac BP |
320 | * PAT |
321 | * |PCD | |
d79a40ca TK |
322 | * ||PWT PAT |
323 | * ||| slot | |
324 | * 000 0 WB : _PAGE_CACHE_MODE_WB | |
325 | * 001 1 WC : _PAGE_CACHE_MODE_WC | |
326 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
327 | * 011 3 UC : _PAGE_CACHE_MODE_UC | |
9cd25aac | 328 | * PAT bit unused |
d79a40ca TK |
329 | * |
330 | * NOTE: When WT or WP is used, it is redirected to UC- per | |
331 | * the default setup in __cachemode2pte_tbl[]. | |
9cd25aac | 332 | */ |
adfe7512 | 333 | pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC); |
d79a40ca TK |
334 | } else { |
335 | /* | |
336 | * Full PAT support. We put WT in slot 7 to improve | |
337 | * robustness in the presence of errata that might cause | |
338 | * the high PAT bit to be ignored. This way, a buggy slot 7 | |
339 | * access will hit slot 3, and slot 3 is UC, so at worst | |
340 | * we lose performance without causing a correctness issue. | |
341 | * Pentium 4 erratum N46 is an example for such an erratum, | |
342 | * although we try not to use PAT at all on affected CPUs. | |
343 | * | |
344 | * PTE encoding: | |
345 | * PAT | |
346 | * |PCD | |
347 | * ||PWT PAT | |
348 | * ||| slot | |
349 | * 000 0 WB : _PAGE_CACHE_MODE_WB | |
350 | * 001 1 WC : _PAGE_CACHE_MODE_WC | |
351 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
352 | * 011 3 UC : _PAGE_CACHE_MODE_UC | |
353 | * 100 4 WB : Reserved | |
aac7b79e | 354 | * 101 5 WP : _PAGE_CACHE_MODE_WP |
d79a40ca TK |
355 | * 110 6 UC-: Reserved |
356 | * 111 7 WT : _PAGE_CACHE_MODE_WT | |
357 | * | |
358 | * The reserved slots are unused, but mapped to their | |
359 | * corresponding types in the presence of PAT errata. | |
360 | */ | |
adfe7512 | 361 | pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT); |
9cd25aac | 362 | } |
2e5d9c85 | 363 | |
adfe7512 | 364 | memory_caching_control |= CACHE_PAT; |
2e5d9c85 | 365 | |
adfe7512 | 366 | init_cache_modes(pat_msr_val); |
2e5d9c85 | 367 | #undef PAT |
adfe7512 | 368 | } |
2e5d9c85 | 369 | |
9e41a49a | 370 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
335ef896 | 371 | |
2e5d9c85 | 372 | /* |
373 | * Does intersection of PAT memory type and MTRR memory type and returns | |
374 | * the resulting memory type as PAT understands it. | |
375 | * (Type in pat and mtrr will not have same value) | |
376 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
377 | * SDM vol 3a | |
378 | */ | |
e00c8cc9 JG |
379 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
380 | enum page_cache_mode req_type) | |
2e5d9c85 | 381 | { |
c26421d0 VP |
382 | /* |
383 | * Look for MTRR hint to get the effective type in case where PAT | |
384 | * request is for WB. | |
385 | */ | |
e00c8cc9 | 386 | if (req_type == _PAGE_CACHE_MODE_WB) { |
b73522e0 | 387 | u8 mtrr_type, uniform; |
dd0c7c49 | 388 | |
b73522e0 | 389 | mtrr_type = mtrr_type_lookup(start, end, &uniform); |
90b926e6 JG |
390 | if (mtrr_type != MTRR_TYPE_WRBACK && |
391 | mtrr_type != MTRR_TYPE_INVALID) | |
e00c8cc9 | 392 | return _PAGE_CACHE_MODE_UC_MINUS; |
b6ff32d9 | 393 | |
e00c8cc9 | 394 | return _PAGE_CACHE_MODE_WB; |
dd0c7c49 AH |
395 | } |
396 | ||
397 | return req_type; | |
2e5d9c85 | 398 | } |
399 | ||
fa83523f JD |
400 | struct pagerange_state { |
401 | unsigned long cur_pfn; | |
402 | int ram; | |
403 | int not_ram; | |
404 | }; | |
405 | ||
406 | static int | |
407 | pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) | |
408 | { | |
409 | struct pagerange_state *state = arg; | |
410 | ||
411 | state->not_ram |= initial_pfn > state->cur_pfn; | |
412 | state->ram |= total_nr_pages > 0; | |
413 | state->cur_pfn = initial_pfn + total_nr_pages; | |
414 | ||
415 | return state->ram && state->not_ram; | |
416 | } | |
417 | ||
3709c857 | 418 | static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) |
be03d9e8 | 419 | { |
fa83523f JD |
420 | int ret = 0; |
421 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
422 | unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
423 | struct pagerange_state state = {start_pfn, 0, 0}; | |
424 | ||
425 | /* | |
426 | * For legacy reasons, physical address range in the legacy ISA | |
427 | * region is tracked as non-RAM. This will allow users of | |
428 | * /dev/mem to map portions of legacy ISA region, even when | |
429 | * some of those portions are listed(or not even listed) with | |
430 | * different e820 types(RAM/reserved/..) | |
431 | */ | |
432 | if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) | |
433 | start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; | |
434 | ||
435 | if (start_pfn < end_pfn) { | |
436 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, | |
437 | &state, pagerange_is_ram_callback); | |
be03d9e8 SS |
438 | } |
439 | ||
fa83523f | 440 | return (ret > 0) ? -1 : (state.ram ? 1 : 0); |
be03d9e8 SS |
441 | } |
442 | ||
9542ada8 | 443 | /* |
f5841740 | 444 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
35a5a104 TK |
445 | * The page flags are limited to four types, WB (default), WC, WT and UC-. |
446 | * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting | |
447 | * a new memory type is only allowed for a page mapped with the default WB | |
448 | * type. | |
0d69bdff TK |
449 | * |
450 | * Here we do two passes: | |
451 | * - Find the memtype of all the pages in the range, look for any conflicts. | |
452 | * - In case of no conflicts, set the new memtype for pages in the range. | |
9542ada8 | 453 | */ |
e00c8cc9 JG |
454 | static int reserve_ram_pages_type(u64 start, u64 end, |
455 | enum page_cache_mode req_type, | |
456 | enum page_cache_mode *new_type) | |
9542ada8 SS |
457 | { |
458 | struct page *page; | |
f5841740 VP |
459 | u64 pfn; |
460 | ||
35a5a104 | 461 | if (req_type == _PAGE_CACHE_MODE_WP) { |
0d69bdff TK |
462 | if (new_type) |
463 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; | |
464 | return -EINVAL; | |
465 | } | |
466 | ||
e00c8cc9 | 467 | if (req_type == _PAGE_CACHE_MODE_UC) { |
f5841740 VP |
468 | /* We do not support strong UC */ |
469 | WARN_ON_ONCE(1); | |
e00c8cc9 | 470 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
f5841740 | 471 | } |
9542ada8 SS |
472 | |
473 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
e00c8cc9 | 474 | enum page_cache_mode type; |
9542ada8 | 475 | |
f5841740 VP |
476 | page = pfn_to_page(pfn); |
477 | type = get_page_memtype(page); | |
35a5a104 | 478 | if (type != _PAGE_CACHE_MODE_WB) { |
9e76561f | 479 | pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", |
365811d6 | 480 | start, end - 1, type, req_type); |
f5841740 VP |
481 | if (new_type) |
482 | *new_type = type; | |
483 | ||
484 | return -EBUSY; | |
485 | } | |
9542ada8 | 486 | } |
9542ada8 | 487 | |
f5841740 VP |
488 | if (new_type) |
489 | *new_type = req_type; | |
490 | ||
491 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
9542ada8 | 492 | page = pfn_to_page(pfn); |
f5841740 | 493 | set_page_memtype(page, req_type); |
9542ada8 | 494 | } |
f5841740 | 495 | return 0; |
9542ada8 SS |
496 | } |
497 | ||
498 | static int free_ram_pages_type(u64 start, u64 end) | |
499 | { | |
500 | struct page *page; | |
f5841740 | 501 | u64 pfn; |
9542ada8 SS |
502 | |
503 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
504 | page = pfn_to_page(pfn); | |
35a5a104 | 505 | set_page_memtype(page, _PAGE_CACHE_MODE_WB); |
9542ada8 SS |
506 | } |
507 | return 0; | |
9542ada8 SS |
508 | } |
509 | ||
510ee090 DW |
510 | static u64 sanitize_phys(u64 address) |
511 | { | |
512 | /* | |
513 | * When changing the memtype for pages containing poison allow | |
514 | * for a "decoy" virtual address (bit 63 clear) passed to | |
515 | * set_memory_X(). __pa() on a "decoy" address results in a | |
516 | * physical address with bit 63 set. | |
51c3fbd8 DW |
517 | * |
518 | * Decoy addresses are not present for 32-bit builds, see | |
519 | * set_mce_nospec(). | |
510ee090 | 520 | */ |
51c3fbd8 DW |
521 | if (IS_ENABLED(CONFIG_X86_64)) |
522 | return address & __PHYSICAL_MASK; | |
523 | return address; | |
510ee090 DW |
524 | } |
525 | ||
e7f260a2 | 526 | /* |
527 | * req_type typically has one of the: | |
e00c8cc9 JG |
528 | * - _PAGE_CACHE_MODE_WB |
529 | * - _PAGE_CACHE_MODE_WC | |
530 | * - _PAGE_CACHE_MODE_UC_MINUS | |
531 | * - _PAGE_CACHE_MODE_UC | |
0d69bdff | 532 | * - _PAGE_CACHE_MODE_WT |
e7f260a2 | 533 | * |
ac97991e AH |
534 | * If new_type is NULL, function will return an error if it cannot reserve the |
535 | * region with req_type. If new_type is non-NULL, function will return | |
536 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 537 | * it will return a negative return value. |
538 | */ | |
ecdd6ee7 | 539 | int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, |
e00c8cc9 | 540 | enum page_cache_mode *new_type) |
2e5d9c85 | 541 | { |
baf65855 | 542 | struct memtype *entry_new; |
e00c8cc9 | 543 | enum page_cache_mode actual_type; |
9542ada8 | 544 | int is_range_ram; |
ad2cde16 | 545 | int err = 0; |
2e5d9c85 | 546 | |
510ee090 | 547 | start = sanitize_phys(start); |
aeef8b50 JM |
548 | |
549 | /* | |
550 | * The end address passed into this function is exclusive, but | |
551 | * sanitize_phys() expects an inclusive address. | |
552 | */ | |
553 | end = sanitize_phys(end - 1) + 1; | |
51c3fbd8 DW |
554 | if (start >= end) { |
555 | WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, | |
556 | start, end - 1, cattr_name(req_type)); | |
557 | return -EINVAL; | |
558 | } | |
69e26be9 | 559 | |
cb32edf6 | 560 | if (!pat_enabled()) { |
e7f260a2 | 561 | /* This is identical to page table setting without PAT */ |
7202fdb1 BP |
562 | if (new_type) |
563 | *new_type = req_type; | |
2e5d9c85 | 564 | return 0; |
565 | } | |
566 | ||
567 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
8a271389 | 568 | if (x86_platform.is_untracked_pat_range(start, end)) { |
ac97991e | 569 | if (new_type) |
e00c8cc9 | 570 | *new_type = _PAGE_CACHE_MODE_WB; |
2e5d9c85 | 571 | return 0; |
572 | } | |
573 | ||
b6ff32d9 SS |
574 | /* |
575 | * Call mtrr_lookup to get the type hint. This is an | |
576 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
577 | * tools and ACPI tools). Use WB request for WB memory and use | |
578 | * UC_MINUS otherwise. | |
579 | */ | |
e00c8cc9 | 580 | actual_type = pat_x_mtrr_type(start, end, req_type); |
2e5d9c85 | 581 | |
95971342 SS |
582 | if (new_type) |
583 | *new_type = actual_type; | |
584 | ||
be03d9e8 | 585 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
586 | if (is_range_ram == 1) { |
587 | ||
f5841740 | 588 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
f5841740 VP |
589 | |
590 | return err; | |
591 | } else if (is_range_ram < 0) { | |
9542ada8 | 592 | return -EINVAL; |
f5841740 | 593 | } |
9542ada8 | 594 | |
baf65855 IM |
595 | entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
596 | if (!entry_new) | |
2e5d9c85 | 597 | return -ENOMEM; |
598 | ||
baf65855 IM |
599 | entry_new->start = start; |
600 | entry_new->end = end; | |
601 | entry_new->type = actual_type; | |
2e5d9c85 | 602 | |
2e5d9c85 | 603 | spin_lock(&memtype_lock); |
604 | ||
baf65855 | 605 | err = memtype_check_insert(entry_new, new_type); |
2e5d9c85 | 606 | if (err) { |
ecdd6ee7 | 607 | pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n", |
9e76561f | 608 | start, end - 1, |
baf65855 IM |
609 | cattr_name(entry_new->type), cattr_name(req_type)); |
610 | kfree(entry_new); | |
2e5d9c85 | 611 | spin_unlock(&memtype_lock); |
ad2cde16 | 612 | |
2e5d9c85 | 613 | return err; |
614 | } | |
615 | ||
2e5d9c85 | 616 | spin_unlock(&memtype_lock); |
3e9c83b3 | 617 | |
ecdd6ee7 | 618 | dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", |
baf65855 | 619 | start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), |
3e9c83b3 AH |
620 | new_type ? cattr_name(*new_type) : "-"); |
621 | ||
2e5d9c85 | 622 | return err; |
623 | } | |
624 | ||
ecdd6ee7 | 625 | int memtype_free(u64 start, u64 end) |
2e5d9c85 | 626 | { |
9542ada8 | 627 | int is_range_ram; |
baf65855 | 628 | struct memtype *entry_old; |
2e5d9c85 | 629 | |
cb32edf6 | 630 | if (!pat_enabled()) |
2e5d9c85 | 631 | return 0; |
2e5d9c85 | 632 | |
510ee090 DW |
633 | start = sanitize_phys(start); |
634 | end = sanitize_phys(end); | |
635 | ||
2e5d9c85 | 636 | /* Low ISA region is always mapped WB. No need to track */ |
8a271389 | 637 | if (x86_platform.is_untracked_pat_range(start, end)) |
2e5d9c85 | 638 | return 0; |
2e5d9c85 | 639 | |
be03d9e8 | 640 | is_range_ram = pat_pagerange_is_ram(start, end); |
47553d42 IM |
641 | if (is_range_ram == 1) |
642 | return free_ram_pages_type(start, end); | |
643 | if (is_range_ram < 0) | |
9542ada8 SS |
644 | return -EINVAL; |
645 | ||
2e5d9c85 | 646 | spin_lock(&memtype_lock); |
baf65855 | 647 | entry_old = memtype_erase(start, end); |
2e5d9c85 | 648 | spin_unlock(&memtype_lock); |
649 | ||
baf65855 | 650 | if (IS_ERR(entry_old)) { |
9e76561f LR |
651 | pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", |
652 | current->comm, current->pid, start, end - 1); | |
20413f27 | 653 | return -EINVAL; |
2e5d9c85 | 654 | } |
6997ab49 | 655 | |
baf65855 | 656 | kfree(entry_old); |
20413f27 | 657 | |
ecdd6ee7 | 658 | dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1); |
ad2cde16 | 659 | |
20413f27 | 660 | return 0; |
2e5d9c85 | 661 | } |
662 | ||
f0970c13 | 663 | |
637b86e7 | 664 | /** |
d9f6e12f | 665 | * lookup_memtype - Looks up the memory type for a physical address |
637b86e7 VP |
666 | * @paddr: physical address of which memory type needs to be looked up |
667 | * | |
668 | * Only to be called when PAT is enabled | |
669 | * | |
2a374698 | 670 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
35a5a104 | 671 | * or _PAGE_CACHE_MODE_WT. |
637b86e7 | 672 | */ |
2a374698 | 673 | static enum page_cache_mode lookup_memtype(u64 paddr) |
637b86e7 | 674 | { |
2a374698 | 675 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
637b86e7 VP |
676 | struct memtype *entry; |
677 | ||
8a271389 | 678 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
637b86e7 VP |
679 | return rettype; |
680 | ||
681 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | |
682 | struct page *page; | |
637b86e7 | 683 | |
35a5a104 TK |
684 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
685 | return get_page_memtype(page); | |
637b86e7 VP |
686 | } |
687 | ||
688 | spin_lock(&memtype_lock); | |
689 | ||
511aaca8 | 690 | entry = memtype_lookup(paddr); |
637b86e7 VP |
691 | if (entry != NULL) |
692 | rettype = entry->type; | |
693 | else | |
2a374698 | 694 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
637b86e7 VP |
695 | |
696 | spin_unlock(&memtype_lock); | |
baf65855 | 697 | |
637b86e7 VP |
698 | return rettype; |
699 | } | |
700 | ||
b8d7044b HZ |
701 | /** |
702 | * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type | |
703 | * of @pfn cannot be overridden by UC MTRR memory type. | |
704 | * | |
705 | * Only to be called when PAT is enabled. | |
706 | * | |
707 | * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. | |
708 | * Returns false in other cases. | |
709 | */ | |
710 | bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) | |
711 | { | |
712 | enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); | |
713 | ||
714 | return cm == _PAGE_CACHE_MODE_UC || | |
715 | cm == _PAGE_CACHE_MODE_UC_MINUS || | |
716 | cm == _PAGE_CACHE_MODE_WC; | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); | |
719 | ||
9fd126bc | 720 | /** |
ecdd6ee7 | 721 | * memtype_reserve_io - Request a memory type mapping for a region of memory |
9fd126bc VP |
722 | * @start: start (physical address) of the region |
723 | * @end: end (physical address) of the region | |
724 | * @type: A pointer to memtype, with requested type. On success, requested | |
725 | * or any other compatible type that was available for the region is returned | |
726 | * | |
727 | * On success, returns 0 | |
728 | * On failure, returns non-zero | |
729 | */ | |
ecdd6ee7 | 730 | int memtype_reserve_io(resource_size_t start, resource_size_t end, |
49a3b3cb | 731 | enum page_cache_mode *type) |
9fd126bc | 732 | { |
b855192c | 733 | resource_size_t size = end - start; |
49a3b3cb JG |
734 | enum page_cache_mode req_type = *type; |
735 | enum page_cache_mode new_type; | |
9fd126bc VP |
736 | int ret; |
737 | ||
b855192c | 738 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
9fd126bc | 739 | |
ecdd6ee7 | 740 | ret = memtype_reserve(start, end, req_type, &new_type); |
9fd126bc VP |
741 | if (ret) |
742 | goto out_err; | |
743 | ||
b855192c | 744 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
9fd126bc VP |
745 | goto out_free; |
746 | ||
ecdd6ee7 | 747 | if (memtype_kernel_map_sync(start, size, new_type) < 0) |
9fd126bc VP |
748 | goto out_free; |
749 | ||
750 | *type = new_type; | |
751 | return 0; | |
752 | ||
753 | out_free: | |
ecdd6ee7 | 754 | memtype_free(start, end); |
9fd126bc VP |
755 | ret = -EBUSY; |
756 | out_err: | |
757 | return ret; | |
758 | } | |
759 | ||
760 | /** | |
ecdd6ee7 | 761 | * memtype_free_io - Release a memory type mapping for a region of memory |
9fd126bc VP |
762 | * @start: start (physical address) of the region |
763 | * @end: end (physical address) of the region | |
764 | */ | |
ecdd6ee7 | 765 | void memtype_free_io(resource_size_t start, resource_size_t end) |
9fd126bc | 766 | { |
ecdd6ee7 | 767 | memtype_free(start, end); |
9fd126bc VP |
768 | } |
769 | ||
16854b56 | 770 | #ifdef CONFIG_X86_PAT |
8ef42276 DA |
771 | int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) |
772 | { | |
773 | enum page_cache_mode type = _PAGE_CACHE_MODE_WC; | |
774 | ||
ecdd6ee7 | 775 | return memtype_reserve_io(start, start + size, &type); |
8ef42276 DA |
776 | } |
777 | EXPORT_SYMBOL(arch_io_reserve_memtype_wc); | |
778 | ||
779 | void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) | |
780 | { | |
ecdd6ee7 | 781 | memtype_free_io(start, start + size); |
8ef42276 DA |
782 | } |
783 | EXPORT_SYMBOL(arch_io_free_memtype_wc); | |
16854b56 | 784 | #endif |
8ef42276 | 785 | |
f0970c13 | 786 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
787 | unsigned long size, pgprot_t vma_prot) | |
788 | { | |
8458bf94 TL |
789 | if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size)) |
790 | vma_prot = pgprot_decrypted(vma_prot); | |
791 | ||
f0970c13 | 792 | return vma_prot; |
793 | } | |
794 | ||
d092633b | 795 | #ifdef CONFIG_STRICT_DEVMEM |
1f40a8bf | 796 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ |
0124cecf VP |
797 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
798 | { | |
799 | return 1; | |
800 | } | |
801 | #else | |
9e41bff2 | 802 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
0124cecf VP |
803 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
804 | { | |
805 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
806 | u64 to = from + size; | |
807 | u64 cursor = from; | |
808 | ||
cb32edf6 | 809 | if (!pat_enabled()) |
9e41bff2 RT |
810 | return 1; |
811 | ||
0124cecf | 812 | while (cursor < to) { |
39380b80 | 813 | if (!devmem_is_allowed(pfn)) |
0124cecf | 814 | return 0; |
0124cecf VP |
815 | cursor += PAGE_SIZE; |
816 | pfn++; | |
817 | } | |
818 | return 1; | |
819 | } | |
d092633b | 820 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 821 | |
f0970c13 | 822 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
823 | unsigned long size, pgprot_t *vma_prot) | |
824 | { | |
e00c8cc9 | 825 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
f0970c13 | 826 | |
0124cecf VP |
827 | if (!range_is_allowed(pfn, size)) |
828 | return 0; | |
829 | ||
6b2f3d1f | 830 | if (file->f_flags & O_DSYNC) |
e00c8cc9 | 831 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
f0970c13 | 832 | |
e7f260a2 | 833 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
e00c8cc9 | 834 | cachemode2protval(pcm)); |
f0970c13 | 835 | return 1; |
836 | } | |
e7f260a2 | 837 | |
7880f746 | 838 | /* |
aee7f913 | 839 | * Change the memory type for the physical address range in kernel identity |
7880f746 VP |
840 | * mapping space if that range is a part of identity map. |
841 | */ | |
ecdd6ee7 | 842 | int memtype_kernel_map_sync(u64 base, unsigned long size, |
b14097bd | 843 | enum page_cache_mode pcm) |
7880f746 VP |
844 | { |
845 | unsigned long id_sz; | |
846 | ||
a25b9316 | 847 | if (base > __pa(high_memory-1)) |
7880f746 VP |
848 | return 0; |
849 | ||
60f583d5 | 850 | /* |
aee7f913 IM |
851 | * Some areas in the middle of the kernel identity range |
852 | * are not mapped, for example the PCI space. | |
60f583d5 DH |
853 | */ |
854 | if (!page_is_ram(base >> PAGE_SHIFT)) | |
855 | return 0; | |
856 | ||
a25b9316 | 857 | id_sz = (__pa(high_memory-1) <= base + size) ? |
aee7f913 | 858 | __pa(high_memory) - base : size; |
7880f746 | 859 | |
b14097bd | 860 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { |
9e76561f | 861 | pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", |
7880f746 | 862 | current->comm, current->pid, |
e00c8cc9 | 863 | cattr_name(pcm), |
365811d6 | 864 | base, (unsigned long long)(base + size-1)); |
7880f746 VP |
865 | return -EINVAL; |
866 | } | |
867 | return 0; | |
868 | } | |
869 | ||
5899329b | 870 | /* |
871 | * Internal interface to reserve a range of physical memory with prot. | |
ecdd6ee7 | 872 | * Reserved non RAM regions only and after successful memtype_reserve, |
5899329b | 873 | * this func also keeps identity mapping (if any) in sync with this new prot. |
874 | */ | |
cdecff68 | 875 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
876 | int strict_prot) | |
5899329b | 877 | { |
878 | int is_ram = 0; | |
7880f746 | 879 | int ret; |
e00c8cc9 JG |
880 | enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); |
881 | enum page_cache_mode pcm = want_pcm; | |
5899329b | 882 | |
be03d9e8 | 883 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 884 | |
be03d9e8 | 885 | /* |
d886c73c VP |
886 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
887 | * track of number of mappings of RAM pages. We can assert that | |
888 | * the type requested matches the type of first page in the range. | |
be03d9e8 | 889 | */ |
d886c73c | 890 | if (is_ram) { |
cb32edf6 | 891 | if (!pat_enabled()) |
d886c73c VP |
892 | return 0; |
893 | ||
e00c8cc9 JG |
894 | pcm = lookup_memtype(paddr); |
895 | if (want_pcm != pcm) { | |
9e76561f | 896 | pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
d886c73c | 897 | current->comm, current->pid, |
e00c8cc9 | 898 | cattr_name(want_pcm), |
d886c73c | 899 | (unsigned long long)paddr, |
365811d6 | 900 | (unsigned long long)(paddr + size - 1), |
e00c8cc9 | 901 | cattr_name(pcm)); |
d886c73c | 902 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
e00c8cc9 JG |
903 | (~_PAGE_CACHE_MASK)) | |
904 | cachemode2protval(pcm)); | |
d886c73c | 905 | } |
4bb9c5c0 | 906 | return 0; |
d886c73c | 907 | } |
5899329b | 908 | |
ecdd6ee7 | 909 | ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm); |
5899329b | 910 | if (ret) |
911 | return ret; | |
912 | ||
e00c8cc9 | 913 | if (pcm != want_pcm) { |
1adcaafe | 914 | if (strict_prot || |
e00c8cc9 | 915 | !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { |
ecdd6ee7 | 916 | memtype_free(paddr, paddr + size); |
9e76561f LR |
917 | pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", |
918 | current->comm, current->pid, | |
919 | cattr_name(want_pcm), | |
920 | (unsigned long long)paddr, | |
921 | (unsigned long long)(paddr + size - 1), | |
922 | cattr_name(pcm)); | |
cdecff68 | 923 | return -EINVAL; |
924 | } | |
925 | /* | |
926 | * We allow returning different type than the one requested in | |
927 | * non strict case. | |
928 | */ | |
929 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
930 | (~_PAGE_CACHE_MASK)) | | |
e00c8cc9 | 931 | cachemode2protval(pcm)); |
5899329b | 932 | } |
933 | ||
ecdd6ee7 IM |
934 | if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { |
935 | memtype_free(paddr, paddr + size); | |
5899329b | 936 | return -EINVAL; |
937 | } | |
938 | return 0; | |
939 | } | |
940 | ||
941 | /* | |
942 | * Internal interface to free a range of physical memory. | |
943 | * Frees non RAM regions only. | |
944 | */ | |
945 | static void free_pfn_range(u64 paddr, unsigned long size) | |
946 | { | |
947 | int is_ram; | |
948 | ||
be03d9e8 | 949 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 950 | if (is_ram == 0) |
ecdd6ee7 | 951 | memtype_free(paddr, paddr + size); |
5899329b | 952 | } |
953 | ||
954 | /* | |
5180da41 | 955 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
5899329b | 956 | * copied through copy_page_range(). |
957 | * | |
958 | * If the vma has a linear pfn mapping for the entire range, we get the prot | |
959 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | |
5899329b | 960 | */ |
5180da41 | 961 | int track_pfn_copy(struct vm_area_struct *vma) |
5899329b | 962 | { |
c1c15b65 | 963 | resource_size_t paddr; |
982d789a | 964 | unsigned long prot; |
4b065046 | 965 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
cdecff68 | 966 | pgprot_t pgprot; |
5899329b | 967 | |
b3b9c293 | 968 | if (vma->vm_flags & VM_PAT) { |
5899329b | 969 | /* |
982d789a | 970 | * reserve the whole chunk covered by vma. We need the |
971 | * starting address and protection from pte. | |
5899329b | 972 | */ |
4b065046 | 973 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
5899329b | 974 | WARN_ON_ONCE(1); |
982d789a | 975 | return -EINVAL; |
5899329b | 976 | } |
cdecff68 | 977 | pgprot = __pgprot(prot); |
978 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | |
5899329b | 979 | } |
980 | ||
5899329b | 981 | return 0; |
5899329b | 982 | } |
983 | ||
984 | /* | |
9049771f DW |
985 | * prot is passed in as a parameter for the new mapping. If the vma has |
986 | * a linear pfn mapping for the entire range, or no vma is provided, | |
987 | * reserve the entire pfn + size range with single reserve_pfn_range | |
988 | * call. | |
5899329b | 989 | */ |
5180da41 | 990 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
b3b9c293 | 991 | unsigned long pfn, unsigned long addr, unsigned long size) |
5899329b | 992 | { |
b1a86e15 | 993 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
2a374698 | 994 | enum page_cache_mode pcm; |
5899329b | 995 | |
b1a86e15 | 996 | /* reserve the whole chunk starting from paddr */ |
9049771f DW |
997 | if (!vma || (addr == vma->vm_start |
998 | && size == (vma->vm_end - vma->vm_start))) { | |
b3b9c293 KK |
999 | int ret; |
1000 | ||
1001 | ret = reserve_pfn_range(paddr, size, prot, 0); | |
9049771f | 1002 | if (ret == 0 && vma) |
1c71222e | 1003 | vm_flags_set(vma, VM_PAT); |
b3b9c293 KK |
1004 | return ret; |
1005 | } | |
5899329b | 1006 | |
cb32edf6 | 1007 | if (!pat_enabled()) |
10876376 VP |
1008 | return 0; |
1009 | ||
5180da41 SS |
1010 | /* |
1011 | * For anything smaller than the vma size we set prot based on the | |
1012 | * lookup. | |
1013 | */ | |
2a374698 | 1014 | pcm = lookup_memtype(paddr); |
5180da41 SS |
1015 | |
1016 | /* Check memtype for the remaining pages */ | |
1017 | while (size > PAGE_SIZE) { | |
1018 | size -= PAGE_SIZE; | |
1019 | paddr += PAGE_SIZE; | |
2a374698 | 1020 | if (pcm != lookup_memtype(paddr)) |
5180da41 SS |
1021 | return -EINVAL; |
1022 | } | |
1023 | ||
dd7b6847 | 1024 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
2a374698 | 1025 | cachemode2protval(pcm)); |
5180da41 SS |
1026 | |
1027 | return 0; | |
1028 | } | |
1029 | ||
308a047c | 1030 | void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) |
5180da41 | 1031 | { |
2a374698 | 1032 | enum page_cache_mode pcm; |
5180da41 | 1033 | |
cb32edf6 | 1034 | if (!pat_enabled()) |
308a047c | 1035 | return; |
5180da41 SS |
1036 | |
1037 | /* Set prot based on lookup */ | |
f25748e3 | 1038 | pcm = lookup_memtype(pfn_t_to_phys(pfn)); |
dd7b6847 | 1039 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
2a374698 | 1040 | cachemode2protval(pcm)); |
5899329b | 1041 | } |
1042 | ||
1043 | /* | |
5180da41 | 1044 | * untrack_pfn is called while unmapping a pfnmap for a region. |
5899329b | 1045 | * untrack can be called for a specific region indicated by pfn and size or |
b1a86e15 | 1046 | * can be for the entire vma (in which case pfn, size are zero). |
5899329b | 1047 | */ |
5180da41 SS |
1048 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
1049 | unsigned long size) | |
5899329b | 1050 | { |
c1c15b65 | 1051 | resource_size_t paddr; |
b1a86e15 | 1052 | unsigned long prot; |
5899329b | 1053 | |
9049771f | 1054 | if (vma && !(vma->vm_flags & VM_PAT)) |
5899329b | 1055 | return; |
b1a86e15 SS |
1056 | |
1057 | /* free the chunk starting from pfn or the whole chunk */ | |
1058 | paddr = (resource_size_t)pfn << PAGE_SHIFT; | |
1059 | if (!paddr && !size) { | |
1060 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { | |
1061 | WARN_ON_ONCE(1); | |
1062 | return; | |
1063 | } | |
1064 | ||
1065 | size = vma->vm_end - vma->vm_start; | |
5899329b | 1066 | } |
b1a86e15 | 1067 | free_pfn_range(paddr, size); |
9049771f | 1068 | if (vma) |
1c71222e | 1069 | vm_flags_clear(vma, VM_PAT); |
5899329b | 1070 | } |
1071 | ||
d9fe4fab TK |
1072 | /* |
1073 | * untrack_pfn_moved is called, while mremapping a pfnmap for a new region, | |
1074 | * with the old vma after its pfnmap page table has been removed. The new | |
1075 | * vma has a new pfnmap to the same pfn & cache type with VM_PAT set. | |
1076 | */ | |
1077 | void untrack_pfn_moved(struct vm_area_struct *vma) | |
1078 | { | |
1c71222e | 1079 | vm_flags_clear(vma, VM_PAT); |
d9fe4fab TK |
1080 | } |
1081 | ||
2520bd31 | 1082 | pgprot_t pgprot_writecombine(pgprot_t prot) |
1083 | { | |
7202fdb1 | 1084 | return __pgprot(pgprot_val(prot) | |
e00c8cc9 | 1085 | cachemode2protval(_PAGE_CACHE_MODE_WC)); |
2520bd31 | 1086 | } |
92b9af9e | 1087 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
2520bd31 | 1088 | |
d1b4bfbf TK |
1089 | pgprot_t pgprot_writethrough(pgprot_t prot) |
1090 | { | |
1091 | return __pgprot(pgprot_val(prot) | | |
1092 | cachemode2protval(_PAGE_CACHE_MODE_WT)); | |
1093 | } | |
1094 | EXPORT_SYMBOL_GPL(pgprot_writethrough); | |
1095 | ||
012f09e7 | 1096 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 1097 | |
aee7f913 IM |
1098 | /* |
1099 | * We are allocating a temporary printout-entry to be passed | |
1100 | * between seq_start()/next() and seq_show(): | |
1101 | */ | |
fec0962e | 1102 | static struct memtype *memtype_get_idx(loff_t pos) |
1103 | { | |
baf65855 | 1104 | struct memtype *entry_print; |
be5a0c12 | 1105 | int ret; |
fec0962e | 1106 | |
baf65855 IM |
1107 | entry_print = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
1108 | if (!entry_print) | |
fec0962e | 1109 | return NULL; |
1110 | ||
1111 | spin_lock(&memtype_lock); | |
baf65855 | 1112 | ret = memtype_copy_nth_element(entry_print, pos); |
fec0962e | 1113 | spin_unlock(&memtype_lock); |
ad2cde16 | 1114 | |
aee7f913 IM |
1115 | /* Free it on error: */ |
1116 | if (ret) { | |
baf65855 | 1117 | kfree(entry_print); |
be5a0c12 | 1118 | return NULL; |
1119 | } | |
aee7f913 | 1120 | |
baf65855 | 1121 | return entry_print; |
fec0962e | 1122 | } |
1123 | ||
1124 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
1125 | { | |
1126 | if (*pos == 0) { | |
1127 | ++*pos; | |
3736708f | 1128 | seq_puts(seq, "PAT memtype list:\n"); |
fec0962e | 1129 | } |
1130 | ||
1131 | return memtype_get_idx(*pos); | |
1132 | } | |
1133 | ||
1134 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1135 | { | |
3d2fc4c0 | 1136 | kfree(v); |
fec0962e | 1137 | ++*pos; |
1138 | return memtype_get_idx(*pos); | |
1139 | } | |
1140 | ||
1141 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
1142 | { | |
3d2fc4c0 | 1143 | kfree(v); |
fec0962e | 1144 | } |
1145 | ||
1146 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
1147 | { | |
baf65855 | 1148 | struct memtype *entry_print = (struct memtype *)v; |
fec0962e | 1149 | |
ef35b0fc | 1150 | seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n", |
baf65855 IM |
1151 | entry_print->start, |
1152 | entry_print->end, | |
1153 | cattr_name(entry_print->type)); | |
aee7f913 | 1154 | |
fec0962e | 1155 | return 0; |
1156 | } | |
1157 | ||
d535e431 | 1158 | static const struct seq_operations memtype_seq_ops = { |
fec0962e | 1159 | .start = memtype_seq_start, |
1160 | .next = memtype_seq_next, | |
1161 | .stop = memtype_seq_stop, | |
1162 | .show = memtype_seq_show, | |
1163 | }; | |
1164 | ||
1165 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
1166 | { | |
1167 | return seq_open(file, &memtype_seq_ops); | |
1168 | } | |
1169 | ||
1170 | static const struct file_operations memtype_fops = { | |
1171 | .open = memtype_seq_open, | |
1172 | .read = seq_read, | |
1173 | .llseek = seq_lseek, | |
1174 | .release = seq_release, | |
1175 | }; | |
1176 | ||
1177 | static int __init pat_memtype_list_init(void) | |
1178 | { | |
cb32edf6 | 1179 | if (pat_enabled()) { |
dd4377b0 XF |
1180 | debugfs_create_file("pat_memtype_list", S_IRUSR, |
1181 | arch_debugfs_dir, NULL, &memtype_fops); | |
1182 | } | |
fec0962e | 1183 | return 0; |
1184 | } | |
fec0962e | 1185 | late_initcall(pat_memtype_list_init); |
1186 | ||
012f09e7 | 1187 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |