Commit | Line | Data |
---|---|---|
2e5d9c85 | 1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
8 | */ | |
9 | ||
10 | #include <linux/mm.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/gfp.h> | |
13 | #include <linux/fs.h> | |
e7f260a2 | 14 | #include <linux/bootmem.h> |
2e5d9c85 | 15 | |
16 | #include <asm/msr.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/processor.h> | |
0124cecf | 19 | #include <asm/page.h> |
2e5d9c85 | 20 | #include <asm/pgtable.h> |
21 | #include <asm/pat.h> | |
22 | #include <asm/e820.h> | |
23 | #include <asm/cacheflush.h> | |
24 | #include <asm/fcntl.h> | |
25 | #include <asm/mtrr.h> | |
e7f260a2 | 26 | #include <asm/io.h> |
2e5d9c85 | 27 | |
8d4a4300 | 28 | #ifdef CONFIG_X86_PAT |
499f8f84 | 29 | int __read_mostly pat_enabled = 1; |
2e5d9c85 | 30 | |
31f4d870 | 31 | void __cpuinit pat_disable(char *reason) |
2e5d9c85 | 32 | { |
499f8f84 | 33 | pat_enabled = 0; |
8d4a4300 | 34 | printk(KERN_INFO "%s\n", reason); |
2e5d9c85 | 35 | } |
2e5d9c85 | 36 | |
be524fb9 | 37 | static int __init nopat(char *str) |
2e5d9c85 | 38 | { |
8d4a4300 | 39 | pat_disable("PAT support disabled."); |
2e5d9c85 | 40 | return 0; |
41 | } | |
8d4a4300 TG |
42 | early_param("nopat", nopat); |
43 | #endif | |
44 | ||
77b52b4c VP |
45 | |
46 | static int debug_enable; | |
47 | static int __init pat_debug_setup(char *str) | |
48 | { | |
49 | debug_enable = 1; | |
50 | return 0; | |
51 | } | |
52 | __setup("debugpat", pat_debug_setup); | |
53 | ||
54 | #define dprintk(fmt, arg...) \ | |
55 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | |
56 | ||
57 | ||
8d4a4300 | 58 | static u64 __read_mostly boot_pat_state; |
2e5d9c85 | 59 | |
60 | enum { | |
61 | PAT_UC = 0, /* uncached */ | |
62 | PAT_WC = 1, /* Write combining */ | |
63 | PAT_WT = 4, /* Write Through */ | |
64 | PAT_WP = 5, /* Write Protected */ | |
65 | PAT_WB = 6, /* Write Back (default) */ | |
66 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | |
67 | }; | |
68 | ||
cd7a4e93 | 69 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 70 | |
71 | void pat_init(void) | |
72 | { | |
73 | u64 pat; | |
74 | ||
499f8f84 | 75 | if (!pat_enabled) |
2e5d9c85 | 76 | return; |
77 | ||
8d4a4300 | 78 | /* Paranoia check. */ |
97cfab6a | 79 | if (!cpu_has_pat && boot_pat_state) { |
8d4a4300 | 80 | /* |
97cfab6a | 81 | * If this happens we are on a secondary CPU, but |
8d4a4300 TG |
82 | * switched to PAT on the boot CPU. We have no way to |
83 | * undo PAT. | |
97cfab6a AH |
84 | */ |
85 | printk(KERN_ERR "PAT enabled, " | |
86 | "but not supported by secondary CPU\n"); | |
87 | BUG(); | |
8d4a4300 | 88 | } |
2e5d9c85 | 89 | |
90 | /* Set PWT to Write-Combining. All other bits stay the same */ | |
91 | /* | |
92 | * PTE encoding used in Linux: | |
93 | * PAT | |
94 | * |PCD | |
95 | * ||PWT | |
96 | * ||| | |
97 | * 000 WB _PAGE_CACHE_WB | |
98 | * 001 WC _PAGE_CACHE_WC | |
99 | * 010 UC- _PAGE_CACHE_UC_MINUS | |
100 | * 011 UC _PAGE_CACHE_UC | |
101 | * PAT bit unused | |
102 | */ | |
cd7a4e93 AH |
103 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
104 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
2e5d9c85 | 105 | |
106 | /* Boot CPU check */ | |
8d4a4300 | 107 | if (!boot_pat_state) |
2e5d9c85 | 108 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); |
2e5d9c85 | 109 | |
110 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
111 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | |
112 | smp_processor_id(), boot_pat_state, pat); | |
113 | } | |
114 | ||
115 | #undef PAT | |
116 | ||
117 | static char *cattr_name(unsigned long flags) | |
118 | { | |
119 | switch (flags & _PAGE_CACHE_MASK) { | |
cd7a4e93 AH |
120 | case _PAGE_CACHE_UC: return "uncached"; |
121 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | |
122 | case _PAGE_CACHE_WB: return "write-back"; | |
123 | case _PAGE_CACHE_WC: return "write-combining"; | |
124 | default: return "broken"; | |
2e5d9c85 | 125 | } |
126 | } | |
127 | ||
128 | /* | |
129 | * The global memtype list keeps track of memory type for specific | |
130 | * physical memory areas. Conflicting memory types in different | |
131 | * mappings can cause CPU cache corruption. To avoid this we keep track. | |
132 | * | |
133 | * The list is sorted based on starting address and can contain multiple | |
134 | * entries for each address (this allows reference counting for overlapping | |
135 | * areas). All the aliases have the same cache attributes of course. | |
136 | * Zero attributes are represented as holes. | |
137 | * | |
138 | * Currently the data structure is a list because the number of mappings | |
139 | * are expected to be relatively small. If this should be a problem | |
140 | * it could be changed to a rbtree or similar. | |
141 | * | |
142 | * memtype_lock protects the whole list. | |
143 | */ | |
144 | ||
145 | struct memtype { | |
146 | u64 start; | |
147 | u64 end; | |
148 | unsigned long type; | |
149 | struct list_head nd; | |
150 | }; | |
151 | ||
152 | static LIST_HEAD(memtype_list); | |
153 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | |
154 | ||
155 | /* | |
156 | * Does intersection of PAT memory type and MTRR memory type and returns | |
157 | * the resulting memory type as PAT understands it. | |
158 | * (Type in pat and mtrr will not have same value) | |
159 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
160 | * SDM vol 3a | |
161 | */ | |
6cf514fc | 162 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
2e5d9c85 | 163 | { |
c26421d0 VP |
164 | /* |
165 | * Look for MTRR hint to get the effective type in case where PAT | |
166 | * request is for WB. | |
167 | */ | |
dd0c7c49 AH |
168 | if (req_type == _PAGE_CACHE_WB) { |
169 | u8 mtrr_type; | |
170 | ||
171 | mtrr_type = mtrr_type_lookup(start, end); | |
172 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) | |
173 | return _PAGE_CACHE_UC; | |
174 | if (mtrr_type == MTRR_TYPE_WRCOMB) | |
175 | return _PAGE_CACHE_WC; | |
176 | } | |
177 | ||
178 | return req_type; | |
2e5d9c85 | 179 | } |
180 | ||
e7f260a2 | 181 | /* |
182 | * req_type typically has one of the: | |
183 | * - _PAGE_CACHE_WB | |
184 | * - _PAGE_CACHE_WC | |
185 | * - _PAGE_CACHE_UC_MINUS | |
186 | * - _PAGE_CACHE_UC | |
187 | * | |
188 | * req_type will have a special case value '-1', when requester want to inherit | |
189 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | |
190 | * | |
191 | * If ret_type is NULL, function will return an error if it cannot reserve the | |
192 | * region with req_type. If ret_type is non-null, function will return | |
193 | * available type in ret_type in case of no error. In case of any error | |
194 | * it will return a negative return value. | |
195 | */ | |
2e5d9c85 | 196 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
197 | unsigned long *ret_type) | |
198 | { | |
199 | struct memtype *new_entry = NULL; | |
200 | struct memtype *parse; | |
201 | unsigned long actual_type; | |
202 | int err = 0; | |
203 | ||
499f8f84 AH |
204 | /* Only track when pat_enabled */ |
205 | if (!pat_enabled) { | |
e7f260a2 | 206 | /* This is identical to page table setting without PAT */ |
207 | if (ret_type) { | |
208 | if (req_type == -1) { | |
209 | *ret_type = _PAGE_CACHE_WB; | |
210 | } else { | |
6cf514fc | 211 | *ret_type = req_type & _PAGE_CACHE_MASK; |
e7f260a2 | 212 | } |
213 | } | |
2e5d9c85 | 214 | return 0; |
215 | } | |
216 | ||
217 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
bcc643dc | 218 | if (is_ISA_range(start, end - 1)) { |
2e5d9c85 | 219 | if (ret_type) |
220 | *ret_type = _PAGE_CACHE_WB; | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
e7f260a2 | 225 | if (req_type == -1) { |
226 | /* | |
c26421d0 VP |
227 | * Call mtrr_lookup to get the type hint. This is an |
228 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
229 | * tools and ACPI tools). Use WB request for WB memory and use | |
230 | * UC_MINUS otherwise. | |
e7f260a2 | 231 | */ |
232 | u8 mtrr_type = mtrr_type_lookup(start, end); | |
e7f260a2 | 233 | |
234 | if (mtrr_type == MTRR_TYPE_WRBACK) { | |
235 | req_type = _PAGE_CACHE_WB; | |
236 | actual_type = _PAGE_CACHE_WB; | |
237 | } else { | |
238 | req_type = _PAGE_CACHE_UC_MINUS; | |
239 | actual_type = _PAGE_CACHE_UC_MINUS; | |
240 | } | |
241 | } else { | |
242 | req_type &= _PAGE_CACHE_MASK; | |
6cf514fc | 243 | actual_type = pat_x_mtrr_type(start, end, req_type); |
2e5d9c85 | 244 | } |
245 | ||
246 | new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | |
247 | if (!new_entry) | |
248 | return -ENOMEM; | |
249 | ||
250 | new_entry->start = start; | |
251 | new_entry->end = end; | |
252 | new_entry->type = actual_type; | |
253 | ||
254 | if (ret_type) | |
255 | *ret_type = actual_type; | |
256 | ||
257 | spin_lock(&memtype_lock); | |
258 | ||
259 | /* Search for existing mapping that overlaps the current range */ | |
260 | list_for_each_entry(parse, &memtype_list, nd) { | |
261 | struct memtype *saved_ptr; | |
262 | ||
263 | if (parse->start >= end) { | |
77b52b4c | 264 | dprintk("New Entry\n"); |
2e5d9c85 | 265 | list_add(&new_entry->nd, parse->nd.prev); |
266 | new_entry = NULL; | |
267 | break; | |
268 | } | |
269 | ||
270 | if (start <= parse->start && end >= parse->start) { | |
271 | if (actual_type != parse->type && ret_type) { | |
272 | actual_type = parse->type; | |
273 | *ret_type = actual_type; | |
274 | new_entry->type = actual_type; | |
275 | } | |
276 | ||
277 | if (actual_type != parse->type) { | |
278 | printk( | |
279 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | |
280 | current->comm, current->pid, | |
281 | start, end, | |
282 | cattr_name(actual_type), | |
283 | cattr_name(parse->type)); | |
284 | err = -EBUSY; | |
285 | break; | |
286 | } | |
287 | ||
288 | saved_ptr = parse; | |
289 | /* | |
290 | * Check to see whether the request overlaps more | |
291 | * than one entry in the list | |
292 | */ | |
293 | list_for_each_entry_continue(parse, &memtype_list, nd) { | |
294 | if (end <= parse->start) { | |
295 | break; | |
296 | } | |
297 | ||
298 | if (actual_type != parse->type) { | |
299 | printk( | |
300 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | |
301 | current->comm, current->pid, | |
302 | start, end, | |
303 | cattr_name(actual_type), | |
304 | cattr_name(parse->type)); | |
305 | err = -EBUSY; | |
306 | break; | |
307 | } | |
308 | } | |
309 | ||
310 | if (err) { | |
311 | break; | |
312 | } | |
313 | ||
77b52b4c | 314 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
6997ab49 | 315 | saved_ptr->start, saved_ptr->end); |
2e5d9c85 | 316 | /* No conflict. Go ahead and add this new entry */ |
317 | list_add(&new_entry->nd, saved_ptr->nd.prev); | |
318 | new_entry = NULL; | |
319 | break; | |
320 | } | |
321 | ||
322 | if (start < parse->end) { | |
323 | if (actual_type != parse->type && ret_type) { | |
324 | actual_type = parse->type; | |
325 | *ret_type = actual_type; | |
326 | new_entry->type = actual_type; | |
327 | } | |
328 | ||
329 | if (actual_type != parse->type) { | |
330 | printk( | |
331 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | |
332 | current->comm, current->pid, | |
333 | start, end, | |
334 | cattr_name(actual_type), | |
335 | cattr_name(parse->type)); | |
336 | err = -EBUSY; | |
337 | break; | |
338 | } | |
339 | ||
340 | saved_ptr = parse; | |
341 | /* | |
342 | * Check to see whether the request overlaps more | |
343 | * than one entry in the list | |
344 | */ | |
345 | list_for_each_entry_continue(parse, &memtype_list, nd) { | |
346 | if (end <= parse->start) { | |
347 | break; | |
348 | } | |
349 | ||
350 | if (actual_type != parse->type) { | |
351 | printk( | |
352 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | |
353 | current->comm, current->pid, | |
354 | start, end, | |
355 | cattr_name(actual_type), | |
356 | cattr_name(parse->type)); | |
357 | err = -EBUSY; | |
358 | break; | |
359 | } | |
360 | } | |
361 | ||
362 | if (err) { | |
363 | break; | |
364 | } | |
365 | ||
77b52b4c | 366 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
86cf02f8 | 367 | saved_ptr->start, saved_ptr->end); |
2e5d9c85 | 368 | /* No conflict. Go ahead and add this new entry */ |
369 | list_add(&new_entry->nd, &saved_ptr->nd); | |
370 | new_entry = NULL; | |
371 | break; | |
372 | } | |
373 | } | |
374 | ||
375 | if (err) { | |
28eb559b | 376 | printk(KERN_INFO |
6997ab49 | 377 | "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", |
378 | start, end, cattr_name(new_entry->type), | |
379 | cattr_name(req_type)); | |
2e5d9c85 | 380 | kfree(new_entry); |
381 | spin_unlock(&memtype_lock); | |
382 | return err; | |
383 | } | |
384 | ||
385 | if (new_entry) { | |
386 | /* No conflict. Not yet added to the list. Add to the tail */ | |
387 | list_add_tail(&new_entry->nd, &memtype_list); | |
77b52b4c | 388 | dprintk("New Entry\n"); |
28eb559b | 389 | } |
6997ab49 | 390 | |
391 | if (ret_type) { | |
77b52b4c | 392 | dprintk( |
6997ab49 | 393 | "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", |
394 | start, end, cattr_name(actual_type), | |
395 | cattr_name(req_type), cattr_name(*ret_type)); | |
396 | } else { | |
77b52b4c | 397 | dprintk( |
6997ab49 | 398 | "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n", |
399 | start, end, cattr_name(actual_type), | |
400 | cattr_name(req_type)); | |
2e5d9c85 | 401 | } |
402 | ||
403 | spin_unlock(&memtype_lock); | |
404 | return err; | |
405 | } | |
406 | ||
407 | int free_memtype(u64 start, u64 end) | |
408 | { | |
409 | struct memtype *ml; | |
410 | int err = -EINVAL; | |
411 | ||
499f8f84 AH |
412 | /* Only track when pat_enabled */ |
413 | if (!pat_enabled) { | |
2e5d9c85 | 414 | return 0; |
415 | } | |
416 | ||
417 | /* Low ISA region is always mapped WB. No need to track */ | |
bcc643dc | 418 | if (is_ISA_range(start, end - 1)) |
2e5d9c85 | 419 | return 0; |
2e5d9c85 | 420 | |
421 | spin_lock(&memtype_lock); | |
422 | list_for_each_entry(ml, &memtype_list, nd) { | |
423 | if (ml->start == start && ml->end == end) { | |
424 | list_del(&ml->nd); | |
425 | kfree(ml); | |
426 | err = 0; | |
427 | break; | |
428 | } | |
429 | } | |
430 | spin_unlock(&memtype_lock); | |
431 | ||
432 | if (err) { | |
28eb559b | 433 | printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", |
2e5d9c85 | 434 | current->comm, current->pid, start, end); |
435 | } | |
6997ab49 | 436 | |
77b52b4c | 437 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); |
2e5d9c85 | 438 | return err; |
439 | } | |
440 | ||
f0970c13 | 441 | |
e7f260a2 | 442 | /* |
443 | * /dev/mem mmap interface. The memtype used for mapping varies: | |
444 | * - Use UC for mappings with O_SYNC flag | |
445 | * - Without O_SYNC flag, if there is any conflict in reserve_memtype, | |
446 | * inherit the memtype from existing mapping. | |
447 | * - Else use UC_MINUS memtype (for backward compatibility with existing | |
448 | * X drivers. | |
449 | */ | |
f0970c13 | 450 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
451 | unsigned long size, pgprot_t vma_prot) | |
452 | { | |
453 | return vma_prot; | |
454 | } | |
455 | ||
0124cecf VP |
456 | #ifdef CONFIG_NONPROMISC_DEVMEM |
457 | /* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/ | |
458 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |
459 | { | |
460 | return 1; | |
461 | } | |
462 | #else | |
463 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |
464 | { | |
465 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
466 | u64 to = from + size; | |
467 | u64 cursor = from; | |
468 | ||
469 | while (cursor < to) { | |
470 | if (!devmem_is_allowed(pfn)) { | |
471 | printk(KERN_INFO | |
472 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | |
473 | current->comm, from, to); | |
474 | return 0; | |
475 | } | |
476 | cursor += PAGE_SIZE; | |
477 | pfn++; | |
478 | } | |
479 | return 1; | |
480 | } | |
481 | #endif /* CONFIG_NONPROMISC_DEVMEM */ | |
482 | ||
f0970c13 | 483 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
484 | unsigned long size, pgprot_t *vma_prot) | |
485 | { | |
e7f260a2 | 486 | u64 offset = ((u64) pfn) << PAGE_SHIFT; |
487 | unsigned long flags = _PAGE_CACHE_UC_MINUS; | |
e7f260a2 | 488 | int retval; |
f0970c13 | 489 | |
0124cecf VP |
490 | if (!range_is_allowed(pfn, size)) |
491 | return 0; | |
492 | ||
f0970c13 | 493 | if (file->f_flags & O_SYNC) { |
e7f260a2 | 494 | flags = _PAGE_CACHE_UC; |
f0970c13 | 495 | } |
496 | ||
497 | #ifdef CONFIG_X86_32 | |
498 | /* | |
499 | * On the PPro and successors, the MTRRs are used to set | |
500 | * memory types for physical addresses outside main memory, | |
501 | * so blindly setting UC or PWT on those pages is wrong. | |
502 | * For Pentiums and earlier, the surround logic should disable | |
503 | * caching for the high addresses through the KEN pin, but | |
504 | * we maintain the tradition of paranoia in this code. | |
505 | */ | |
499f8f84 | 506 | if (!pat_enabled && |
cd7a4e93 AH |
507 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
508 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | |
509 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | |
510 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | |
511 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | |
e7f260a2 | 512 | flags = _PAGE_CACHE_UC; |
f0970c13 | 513 | } |
514 | #endif | |
515 | ||
e7f260a2 | 516 | /* |
517 | * With O_SYNC, we can only take UC mapping. Fail if we cannot. | |
518 | * Without O_SYNC, we want to get | |
519 | * - WB for WB-able memory and no other conflicting mappings | |
520 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | |
521 | * - Inherit from confliting mappings otherwise | |
522 | */ | |
523 | if (flags != _PAGE_CACHE_UC_MINUS) { | |
524 | retval = reserve_memtype(offset, offset + size, flags, NULL); | |
525 | } else { | |
f022bfd5 | 526 | retval = reserve_memtype(offset, offset + size, -1, &flags); |
e7f260a2 | 527 | } |
528 | ||
529 | if (retval < 0) | |
530 | return 0; | |
531 | ||
e7f260a2 | 532 | if (pfn <= max_pfn_mapped && |
cd7a4e93 | 533 | ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { |
e7f260a2 | 534 | free_memtype(offset, offset + size); |
28eb559b | 535 | printk(KERN_INFO |
e7f260a2 | 536 | "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", |
537 | current->comm, current->pid, | |
538 | cattr_name(flags), | |
afc85343 | 539 | offset, (unsigned long long)(offset + size)); |
e7f260a2 | 540 | return 0; |
541 | } | |
542 | ||
543 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | | |
544 | flags); | |
f0970c13 | 545 | return 1; |
546 | } | |
e7f260a2 | 547 | |
548 | void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |
549 | { | |
550 | u64 addr = (u64)pfn << PAGE_SHIFT; | |
551 | unsigned long flags; | |
552 | unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); | |
553 | ||
554 | reserve_memtype(addr, addr + size, want_flags, &flags); | |
555 | if (flags != want_flags) { | |
28eb559b | 556 | printk(KERN_INFO |
e7f260a2 | 557 | "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n", |
558 | current->comm, current->pid, | |
559 | cattr_name(want_flags), | |
afc85343 | 560 | addr, (unsigned long long)(addr + size), |
e7f260a2 | 561 | cattr_name(flags)); |
562 | } | |
563 | } | |
564 | ||
565 | void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |
566 | { | |
567 | u64 addr = (u64)pfn << PAGE_SHIFT; | |
568 | ||
569 | free_memtype(addr, addr + size); | |
570 | } |