Commit | Line | Data |
---|---|---|
2e5d9c85 | 1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
8 | */ | |
9 | ||
10 | #include <linux/mm.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/gfp.h> | |
13 | #include <linux/fs.h> | |
e7f260a2 | 14 | #include <linux/bootmem.h> |
fec0962e | 15 | #include <linux/debugfs.h> |
16 | #include <linux/seq_file.h> | |
2e5d9c85 | 17 | |
18 | #include <asm/msr.h> | |
19 | #include <asm/tlbflush.h> | |
20 | #include <asm/processor.h> | |
0124cecf | 21 | #include <asm/page.h> |
2e5d9c85 | 22 | #include <asm/pgtable.h> |
23 | #include <asm/pat.h> | |
24 | #include <asm/e820.h> | |
25 | #include <asm/cacheflush.h> | |
26 | #include <asm/fcntl.h> | |
27 | #include <asm/mtrr.h> | |
e7f260a2 | 28 | #include <asm/io.h> |
2e5d9c85 | 29 | |
8d4a4300 | 30 | #ifdef CONFIG_X86_PAT |
499f8f84 | 31 | int __read_mostly pat_enabled = 1; |
2e5d9c85 | 32 | |
31f4d870 | 33 | void __cpuinit pat_disable(char *reason) |
2e5d9c85 | 34 | { |
499f8f84 | 35 | pat_enabled = 0; |
8d4a4300 | 36 | printk(KERN_INFO "%s\n", reason); |
2e5d9c85 | 37 | } |
2e5d9c85 | 38 | |
be524fb9 | 39 | static int __init nopat(char *str) |
2e5d9c85 | 40 | { |
8d4a4300 | 41 | pat_disable("PAT support disabled."); |
2e5d9c85 | 42 | return 0; |
43 | } | |
8d4a4300 TG |
44 | early_param("nopat", nopat); |
45 | #endif | |
46 | ||
77b52b4c VP |
47 | |
48 | static int debug_enable; | |
49 | static int __init pat_debug_setup(char *str) | |
50 | { | |
51 | debug_enable = 1; | |
52 | return 0; | |
53 | } | |
54 | __setup("debugpat", pat_debug_setup); | |
55 | ||
56 | #define dprintk(fmt, arg...) \ | |
57 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | |
58 | ||
59 | ||
8d4a4300 | 60 | static u64 __read_mostly boot_pat_state; |
2e5d9c85 | 61 | |
62 | enum { | |
63 | PAT_UC = 0, /* uncached */ | |
64 | PAT_WC = 1, /* Write combining */ | |
65 | PAT_WT = 4, /* Write Through */ | |
66 | PAT_WP = 5, /* Write Protected */ | |
67 | PAT_WB = 6, /* Write Back (default) */ | |
68 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | |
69 | }; | |
70 | ||
cd7a4e93 | 71 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 72 | |
73 | void pat_init(void) | |
74 | { | |
75 | u64 pat; | |
76 | ||
499f8f84 | 77 | if (!pat_enabled) |
2e5d9c85 | 78 | return; |
79 | ||
8d4a4300 | 80 | /* Paranoia check. */ |
97cfab6a | 81 | if (!cpu_has_pat && boot_pat_state) { |
8d4a4300 | 82 | /* |
97cfab6a | 83 | * If this happens we are on a secondary CPU, but |
8d4a4300 TG |
84 | * switched to PAT on the boot CPU. We have no way to |
85 | * undo PAT. | |
97cfab6a AH |
86 | */ |
87 | printk(KERN_ERR "PAT enabled, " | |
88 | "but not supported by secondary CPU\n"); | |
89 | BUG(); | |
8d4a4300 | 90 | } |
2e5d9c85 | 91 | |
92 | /* Set PWT to Write-Combining. All other bits stay the same */ | |
93 | /* | |
94 | * PTE encoding used in Linux: | |
95 | * PAT | |
96 | * |PCD | |
97 | * ||PWT | |
98 | * ||| | |
99 | * 000 WB _PAGE_CACHE_WB | |
100 | * 001 WC _PAGE_CACHE_WC | |
101 | * 010 UC- _PAGE_CACHE_UC_MINUS | |
102 | * 011 UC _PAGE_CACHE_UC | |
103 | * PAT bit unused | |
104 | */ | |
cd7a4e93 AH |
105 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
106 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
2e5d9c85 | 107 | |
108 | /* Boot CPU check */ | |
8d4a4300 | 109 | if (!boot_pat_state) |
2e5d9c85 | 110 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); |
2e5d9c85 | 111 | |
112 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
113 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | |
114 | smp_processor_id(), boot_pat_state, pat); | |
115 | } | |
116 | ||
117 | #undef PAT | |
118 | ||
119 | static char *cattr_name(unsigned long flags) | |
120 | { | |
121 | switch (flags & _PAGE_CACHE_MASK) { | |
cd7a4e93 AH |
122 | case _PAGE_CACHE_UC: return "uncached"; |
123 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | |
124 | case _PAGE_CACHE_WB: return "write-back"; | |
125 | case _PAGE_CACHE_WC: return "write-combining"; | |
126 | default: return "broken"; | |
2e5d9c85 | 127 | } |
128 | } | |
129 | ||
130 | /* | |
131 | * The global memtype list keeps track of memory type for specific | |
132 | * physical memory areas. Conflicting memory types in different | |
133 | * mappings can cause CPU cache corruption. To avoid this we keep track. | |
134 | * | |
135 | * The list is sorted based on starting address and can contain multiple | |
136 | * entries for each address (this allows reference counting for overlapping | |
137 | * areas). All the aliases have the same cache attributes of course. | |
138 | * Zero attributes are represented as holes. | |
139 | * | |
140 | * Currently the data structure is a list because the number of mappings | |
141 | * are expected to be relatively small. If this should be a problem | |
142 | * it could be changed to a rbtree or similar. | |
143 | * | |
144 | * memtype_lock protects the whole list. | |
145 | */ | |
146 | ||
147 | struct memtype { | |
148 | u64 start; | |
149 | u64 end; | |
150 | unsigned long type; | |
151 | struct list_head nd; | |
152 | }; | |
153 | ||
154 | static LIST_HEAD(memtype_list); | |
155 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | |
156 | ||
157 | /* | |
158 | * Does intersection of PAT memory type and MTRR memory type and returns | |
159 | * the resulting memory type as PAT understands it. | |
160 | * (Type in pat and mtrr will not have same value) | |
161 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
162 | * SDM vol 3a | |
163 | */ | |
6cf514fc | 164 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
2e5d9c85 | 165 | { |
c26421d0 VP |
166 | /* |
167 | * Look for MTRR hint to get the effective type in case where PAT | |
168 | * request is for WB. | |
169 | */ | |
dd0c7c49 AH |
170 | if (req_type == _PAGE_CACHE_WB) { |
171 | u8 mtrr_type; | |
172 | ||
173 | mtrr_type = mtrr_type_lookup(start, end); | |
174 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) | |
175 | return _PAGE_CACHE_UC; | |
176 | if (mtrr_type == MTRR_TYPE_WRCOMB) | |
177 | return _PAGE_CACHE_WC; | |
178 | } | |
179 | ||
180 | return req_type; | |
2e5d9c85 | 181 | } |
182 | ||
64fe44c3 AH |
183 | static int chk_conflict(struct memtype *new, struct memtype *entry, |
184 | unsigned long *type) | |
185 | { | |
186 | if (new->type != entry->type) { | |
187 | if (type) { | |
188 | new->type = entry->type; | |
189 | *type = entry->type; | |
190 | } else | |
191 | goto conflict; | |
192 | } | |
193 | ||
194 | /* check overlaps with more than one entry in the list */ | |
195 | list_for_each_entry_continue(entry, &memtype_list, nd) { | |
196 | if (new->end <= entry->start) | |
197 | break; | |
198 | else if (new->type != entry->type) | |
199 | goto conflict; | |
200 | } | |
201 | return 0; | |
202 | ||
203 | conflict: | |
204 | printk(KERN_INFO "%s:%d conflicting memory types " | |
205 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, | |
206 | new->end, cattr_name(new->type), cattr_name(entry->type)); | |
207 | return -EBUSY; | |
208 | } | |
209 | ||
80c5e73d VP |
210 | static struct memtype *cached_entry; |
211 | static u64 cached_start; | |
212 | ||
e7f260a2 | 213 | /* |
214 | * req_type typically has one of the: | |
215 | * - _PAGE_CACHE_WB | |
216 | * - _PAGE_CACHE_WC | |
217 | * - _PAGE_CACHE_UC_MINUS | |
218 | * - _PAGE_CACHE_UC | |
219 | * | |
220 | * req_type will have a special case value '-1', when requester want to inherit | |
221 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | |
222 | * | |
ac97991e AH |
223 | * If new_type is NULL, function will return an error if it cannot reserve the |
224 | * region with req_type. If new_type is non-NULL, function will return | |
225 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 226 | * it will return a negative return value. |
227 | */ | |
2e5d9c85 | 228 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
ac97991e | 229 | unsigned long *new_type) |
2e5d9c85 | 230 | { |
ac97991e | 231 | struct memtype *new, *entry; |
2e5d9c85 | 232 | unsigned long actual_type; |
f6887264 | 233 | struct list_head *where; |
2e5d9c85 | 234 | int err = 0; |
235 | ||
69e26be9 AH |
236 | BUG_ON(start >= end); /* end is exclusive */ |
237 | ||
499f8f84 | 238 | if (!pat_enabled) { |
e7f260a2 | 239 | /* This is identical to page table setting without PAT */ |
ac97991e AH |
240 | if (new_type) { |
241 | if (req_type == -1) | |
242 | *new_type = _PAGE_CACHE_WB; | |
243 | else | |
244 | *new_type = req_type & _PAGE_CACHE_MASK; | |
e7f260a2 | 245 | } |
2e5d9c85 | 246 | return 0; |
247 | } | |
248 | ||
249 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
bcc643dc | 250 | if (is_ISA_range(start, end - 1)) { |
ac97991e AH |
251 | if (new_type) |
252 | *new_type = _PAGE_CACHE_WB; | |
2e5d9c85 | 253 | return 0; |
254 | } | |
255 | ||
e7f260a2 | 256 | if (req_type == -1) { |
257 | /* | |
c26421d0 VP |
258 | * Call mtrr_lookup to get the type hint. This is an |
259 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
260 | * tools and ACPI tools). Use WB request for WB memory and use | |
261 | * UC_MINUS otherwise. | |
e7f260a2 | 262 | */ |
263 | u8 mtrr_type = mtrr_type_lookup(start, end); | |
e7f260a2 | 264 | |
69e26be9 | 265 | if (mtrr_type == MTRR_TYPE_WRBACK) |
e7f260a2 | 266 | actual_type = _PAGE_CACHE_WB; |
69e26be9 | 267 | else |
e7f260a2 | 268 | actual_type = _PAGE_CACHE_UC_MINUS; |
69e26be9 AH |
269 | } else |
270 | actual_type = pat_x_mtrr_type(start, end, | |
271 | req_type & _PAGE_CACHE_MASK); | |
2e5d9c85 | 272 | |
ac97991e AH |
273 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
274 | if (!new) | |
2e5d9c85 | 275 | return -ENOMEM; |
276 | ||
ac97991e AH |
277 | new->start = start; |
278 | new->end = end; | |
279 | new->type = actual_type; | |
2e5d9c85 | 280 | |
ac97991e AH |
281 | if (new_type) |
282 | *new_type = actual_type; | |
2e5d9c85 | 283 | |
284 | spin_lock(&memtype_lock); | |
285 | ||
80c5e73d VP |
286 | if (cached_entry && start >= cached_start) |
287 | entry = cached_entry; | |
288 | else | |
289 | entry = list_entry(&memtype_list, struct memtype, nd); | |
290 | ||
2e5d9c85 | 291 | /* Search for existing mapping that overlaps the current range */ |
f6887264 | 292 | where = NULL; |
80c5e73d | 293 | list_for_each_entry_continue(entry, &memtype_list, nd) { |
33af9039 | 294 | if (end <= entry->start) { |
f6887264 | 295 | where = entry->nd.prev; |
80c5e73d | 296 | cached_entry = list_entry(where, struct memtype, nd); |
2e5d9c85 | 297 | break; |
33af9039 | 298 | } else if (start <= entry->start) { /* end > entry->start */ |
64fe44c3 | 299 | err = chk_conflict(new, entry, new_type); |
33af9039 AH |
300 | if (!err) { |
301 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | |
302 | entry->start, entry->end); | |
303 | where = entry->nd.prev; | |
80c5e73d VP |
304 | cached_entry = list_entry(where, |
305 | struct memtype, nd); | |
2e5d9c85 | 306 | } |
2e5d9c85 | 307 | break; |
33af9039 | 308 | } else if (start < entry->end) { /* start > entry->start */ |
64fe44c3 | 309 | err = chk_conflict(new, entry, new_type); |
33af9039 AH |
310 | if (!err) { |
311 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | |
312 | entry->start, entry->end); | |
80c5e73d VP |
313 | cached_entry = list_entry(entry->nd.prev, |
314 | struct memtype, nd); | |
315 | ||
316 | /* | |
317 | * Move to right position in the linked | |
318 | * list to add this new entry | |
319 | */ | |
320 | list_for_each_entry_continue(entry, | |
321 | &memtype_list, nd) { | |
322 | if (start <= entry->start) { | |
323 | where = entry->nd.prev; | |
324 | break; | |
325 | } | |
326 | } | |
2e5d9c85 | 327 | } |
2e5d9c85 | 328 | break; |
329 | } | |
330 | } | |
331 | ||
332 | if (err) { | |
3e9c83b3 AH |
333 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " |
334 | "track %s, req %s\n", | |
335 | start, end, cattr_name(new->type), cattr_name(req_type)); | |
ac97991e | 336 | kfree(new); |
2e5d9c85 | 337 | spin_unlock(&memtype_lock); |
338 | return err; | |
339 | } | |
340 | ||
80c5e73d VP |
341 | cached_start = start; |
342 | ||
f6887264 AH |
343 | if (where) |
344 | list_add(&new->nd, where); | |
345 | else | |
ac97991e | 346 | list_add_tail(&new->nd, &memtype_list); |
6997ab49 | 347 | |
2e5d9c85 | 348 | spin_unlock(&memtype_lock); |
3e9c83b3 AH |
349 | |
350 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | |
351 | start, end, cattr_name(new->type), cattr_name(req_type), | |
352 | new_type ? cattr_name(*new_type) : "-"); | |
353 | ||
2e5d9c85 | 354 | return err; |
355 | } | |
356 | ||
357 | int free_memtype(u64 start, u64 end) | |
358 | { | |
ac97991e | 359 | struct memtype *entry; |
2e5d9c85 | 360 | int err = -EINVAL; |
361 | ||
69e26be9 | 362 | if (!pat_enabled) |
2e5d9c85 | 363 | return 0; |
2e5d9c85 | 364 | |
365 | /* Low ISA region is always mapped WB. No need to track */ | |
bcc643dc | 366 | if (is_ISA_range(start, end - 1)) |
2e5d9c85 | 367 | return 0; |
2e5d9c85 | 368 | |
369 | spin_lock(&memtype_lock); | |
ac97991e AH |
370 | list_for_each_entry(entry, &memtype_list, nd) { |
371 | if (entry->start == start && entry->end == end) { | |
80c5e73d VP |
372 | if (cached_entry == entry || cached_start == start) |
373 | cached_entry = NULL; | |
374 | ||
ac97991e AH |
375 | list_del(&entry->nd); |
376 | kfree(entry); | |
2e5d9c85 | 377 | err = 0; |
378 | break; | |
379 | } | |
380 | } | |
381 | spin_unlock(&memtype_lock); | |
382 | ||
383 | if (err) { | |
28eb559b | 384 | printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", |
2e5d9c85 | 385 | current->comm, current->pid, start, end); |
386 | } | |
6997ab49 | 387 | |
77b52b4c | 388 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); |
2e5d9c85 | 389 | return err; |
390 | } | |
391 | ||
f0970c13 | 392 | |
f0970c13 | 393 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
394 | unsigned long size, pgprot_t vma_prot) | |
395 | { | |
396 | return vma_prot; | |
397 | } | |
398 | ||
d092633b IM |
399 | #ifdef CONFIG_STRICT_DEVMEM |
400 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ | |
0124cecf VP |
401 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
402 | { | |
403 | return 1; | |
404 | } | |
405 | #else | |
406 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |
407 | { | |
408 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
409 | u64 to = from + size; | |
410 | u64 cursor = from; | |
411 | ||
412 | while (cursor < to) { | |
413 | if (!devmem_is_allowed(pfn)) { | |
414 | printk(KERN_INFO | |
415 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | |
416 | current->comm, from, to); | |
417 | return 0; | |
418 | } | |
419 | cursor += PAGE_SIZE; | |
420 | pfn++; | |
421 | } | |
422 | return 1; | |
423 | } | |
d092633b | 424 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 425 | |
f0970c13 | 426 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
427 | unsigned long size, pgprot_t *vma_prot) | |
428 | { | |
e7f260a2 | 429 | u64 offset = ((u64) pfn) << PAGE_SHIFT; |
28df82eb | 430 | unsigned long flags = -1; |
e7f260a2 | 431 | int retval; |
f0970c13 | 432 | |
0124cecf VP |
433 | if (!range_is_allowed(pfn, size)) |
434 | return 0; | |
435 | ||
f0970c13 | 436 | if (file->f_flags & O_SYNC) { |
28df82eb | 437 | flags = _PAGE_CACHE_UC_MINUS; |
f0970c13 | 438 | } |
439 | ||
440 | #ifdef CONFIG_X86_32 | |
441 | /* | |
442 | * On the PPro and successors, the MTRRs are used to set | |
443 | * memory types for physical addresses outside main memory, | |
444 | * so blindly setting UC or PWT on those pages is wrong. | |
445 | * For Pentiums and earlier, the surround logic should disable | |
446 | * caching for the high addresses through the KEN pin, but | |
447 | * we maintain the tradition of paranoia in this code. | |
448 | */ | |
499f8f84 | 449 | if (!pat_enabled && |
cd7a4e93 AH |
450 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
451 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | |
452 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | |
453 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | |
454 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | |
e7f260a2 | 455 | flags = _PAGE_CACHE_UC; |
f0970c13 | 456 | } |
457 | #endif | |
458 | ||
e7f260a2 | 459 | /* |
28df82eb | 460 | * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot. |
461 | * | |
e7f260a2 | 462 | * Without O_SYNC, we want to get |
463 | * - WB for WB-able memory and no other conflicting mappings | |
464 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | |
465 | * - Inherit from confliting mappings otherwise | |
466 | */ | |
28df82eb | 467 | if (flags != -1) { |
e7f260a2 | 468 | retval = reserve_memtype(offset, offset + size, flags, NULL); |
469 | } else { | |
f022bfd5 | 470 | retval = reserve_memtype(offset, offset + size, -1, &flags); |
e7f260a2 | 471 | } |
472 | ||
473 | if (retval < 0) | |
474 | return 0; | |
475 | ||
965194c1 YL |
476 | if (((pfn < max_low_pfn_mapped) || |
477 | (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) && | |
cd7a4e93 | 478 | ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { |
e7f260a2 | 479 | free_memtype(offset, offset + size); |
28eb559b | 480 | printk(KERN_INFO |
e7f260a2 | 481 | "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", |
482 | current->comm, current->pid, | |
483 | cattr_name(flags), | |
afc85343 | 484 | offset, (unsigned long long)(offset + size)); |
e7f260a2 | 485 | return 0; |
486 | } | |
487 | ||
488 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | | |
489 | flags); | |
f0970c13 | 490 | return 1; |
491 | } | |
e7f260a2 | 492 | |
493 | void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |
494 | { | |
495 | u64 addr = (u64)pfn << PAGE_SHIFT; | |
496 | unsigned long flags; | |
497 | unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); | |
498 | ||
499 | reserve_memtype(addr, addr + size, want_flags, &flags); | |
500 | if (flags != want_flags) { | |
28eb559b | 501 | printk(KERN_INFO |
e7f260a2 | 502 | "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n", |
503 | current->comm, current->pid, | |
504 | cattr_name(want_flags), | |
afc85343 | 505 | addr, (unsigned long long)(addr + size), |
e7f260a2 | 506 | cattr_name(flags)); |
507 | } | |
508 | } | |
509 | ||
510 | void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |
511 | { | |
512 | u64 addr = (u64)pfn << PAGE_SHIFT; | |
513 | ||
514 | free_memtype(addr, addr + size); | |
515 | } | |
516 | ||
012f09e7 | 517 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 518 | |
519 | /* get Nth element of the linked list */ | |
520 | static struct memtype *memtype_get_idx(loff_t pos) | |
521 | { | |
522 | struct memtype *list_node, *print_entry; | |
523 | int i = 1; | |
524 | ||
525 | print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | |
526 | if (!print_entry) | |
527 | return NULL; | |
528 | ||
529 | spin_lock(&memtype_lock); | |
530 | list_for_each_entry(list_node, &memtype_list, nd) { | |
531 | if (pos == i) { | |
532 | *print_entry = *list_node; | |
533 | spin_unlock(&memtype_lock); | |
534 | return print_entry; | |
535 | } | |
536 | ++i; | |
537 | } | |
538 | spin_unlock(&memtype_lock); | |
539 | kfree(print_entry); | |
540 | return NULL; | |
541 | } | |
542 | ||
543 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
544 | { | |
545 | if (*pos == 0) { | |
546 | ++*pos; | |
547 | seq_printf(seq, "PAT memtype list:\n"); | |
548 | } | |
549 | ||
550 | return memtype_get_idx(*pos); | |
551 | } | |
552 | ||
553 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
554 | { | |
555 | ++*pos; | |
556 | return memtype_get_idx(*pos); | |
557 | } | |
558 | ||
559 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
560 | { | |
561 | } | |
562 | ||
563 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
564 | { | |
565 | struct memtype *print_entry = (struct memtype *)v; | |
566 | ||
567 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | |
568 | print_entry->start, print_entry->end); | |
569 | kfree(print_entry); | |
570 | return 0; | |
571 | } | |
572 | ||
573 | static struct seq_operations memtype_seq_ops = { | |
574 | .start = memtype_seq_start, | |
575 | .next = memtype_seq_next, | |
576 | .stop = memtype_seq_stop, | |
577 | .show = memtype_seq_show, | |
578 | }; | |
579 | ||
580 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
581 | { | |
582 | return seq_open(file, &memtype_seq_ops); | |
583 | } | |
584 | ||
585 | static const struct file_operations memtype_fops = { | |
586 | .open = memtype_seq_open, | |
587 | .read = seq_read, | |
588 | .llseek = seq_lseek, | |
589 | .release = seq_release, | |
590 | }; | |
591 | ||
592 | static int __init pat_memtype_list_init(void) | |
593 | { | |
594 | debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, | |
595 | NULL, &memtype_fops); | |
596 | return 0; | |
597 | } | |
598 | ||
599 | late_initcall(pat_memtype_list_init); | |
600 | ||
012f09e7 | 601 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |