Commit | Line | Data |
---|---|---|
ee2636b8 SM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright © 2018 Intel Corporation. | |
4 | * | |
5 | * Authors: Gayatri Kammela <gayatri.kammela@intel.com> | |
6 | * Sohil Mehta <sohil.mehta@intel.com> | |
7 | * Jacob Pan <jacob.jun.pan@linux.intel.com> | |
e2726dae | 8 | * Lu Baolu <baolu.lu@linux.intel.com> |
ee2636b8 SM |
9 | */ |
10 | ||
11 | #include <linux/debugfs.h> | |
12 | #include <linux/dmar.h> | |
13 | #include <linux/intel-iommu.h> | |
14 | #include <linux/pci.h> | |
15 | ||
16 | #include <asm/irq_remapping.h> | |
17 | ||
02f3effd | 18 | #include "pasid.h" |
456bb0b9 | 19 | #include "perf.h" |
dd5142ca | 20 | |
ea09506c SPP |
21 | struct tbl_walk { |
22 | u16 bus; | |
23 | u16 devfn; | |
dd5142ca | 24 | u32 pasid; |
ea09506c SPP |
25 | struct root_entry *rt_entry; |
26 | struct context_entry *ctx_entry; | |
dd5142ca | 27 | struct pasid_entry *pasid_tbl_entry; |
ea09506c SPP |
28 | }; |
29 | ||
6825d3ea GK |
30 | struct iommu_regset { |
31 | int offset; | |
32 | const char *regs; | |
33 | }; | |
34 | ||
456bb0b9 LB |
35 | #define DEBUG_BUFFER_SIZE 1024 |
36 | static char debug_buf[DEBUG_BUFFER_SIZE]; | |
37 | ||
6825d3ea GK |
38 | #define IOMMU_REGSET_ENTRY(_reg_) \ |
39 | { DMAR_##_reg_##_REG, __stringify(_reg_) } | |
ba3b01d7 MD |
40 | |
41 | static const struct iommu_regset iommu_regs_32[] = { | |
6825d3ea | 42 | IOMMU_REGSET_ENTRY(VER), |
6825d3ea GK |
43 | IOMMU_REGSET_ENTRY(GCMD), |
44 | IOMMU_REGSET_ENTRY(GSTS), | |
6825d3ea GK |
45 | IOMMU_REGSET_ENTRY(FSTS), |
46 | IOMMU_REGSET_ENTRY(FECTL), | |
47 | IOMMU_REGSET_ENTRY(FEDATA), | |
48 | IOMMU_REGSET_ENTRY(FEADDR), | |
49 | IOMMU_REGSET_ENTRY(FEUADDR), | |
6825d3ea GK |
50 | IOMMU_REGSET_ENTRY(PMEN), |
51 | IOMMU_REGSET_ENTRY(PLMBASE), | |
52 | IOMMU_REGSET_ENTRY(PLMLIMIT), | |
ba3b01d7 MD |
53 | IOMMU_REGSET_ENTRY(ICS), |
54 | IOMMU_REGSET_ENTRY(PRS), | |
55 | IOMMU_REGSET_ENTRY(PECTL), | |
56 | IOMMU_REGSET_ENTRY(PEDATA), | |
57 | IOMMU_REGSET_ENTRY(PEADDR), | |
58 | IOMMU_REGSET_ENTRY(PEUADDR), | |
59 | }; | |
60 | ||
61 | static const struct iommu_regset iommu_regs_64[] = { | |
62 | IOMMU_REGSET_ENTRY(CAP), | |
63 | IOMMU_REGSET_ENTRY(ECAP), | |
64 | IOMMU_REGSET_ENTRY(RTADDR), | |
65 | IOMMU_REGSET_ENTRY(CCMD), | |
66 | IOMMU_REGSET_ENTRY(AFLOG), | |
6825d3ea GK |
67 | IOMMU_REGSET_ENTRY(PHMBASE), |
68 | IOMMU_REGSET_ENTRY(PHMLIMIT), | |
69 | IOMMU_REGSET_ENTRY(IQH), | |
70 | IOMMU_REGSET_ENTRY(IQT), | |
71 | IOMMU_REGSET_ENTRY(IQA), | |
6825d3ea GK |
72 | IOMMU_REGSET_ENTRY(IRTA), |
73 | IOMMU_REGSET_ENTRY(PQH), | |
74 | IOMMU_REGSET_ENTRY(PQT), | |
75 | IOMMU_REGSET_ENTRY(PQA), | |
6825d3ea GK |
76 | IOMMU_REGSET_ENTRY(MTRRCAP), |
77 | IOMMU_REGSET_ENTRY(MTRRDEF), | |
78 | IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000), | |
79 | IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000), | |
80 | IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000), | |
81 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000), | |
82 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000), | |
83 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000), | |
84 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000), | |
85 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000), | |
86 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000), | |
87 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000), | |
88 | IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000), | |
89 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0), | |
90 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0), | |
91 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1), | |
92 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1), | |
93 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2), | |
94 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2), | |
95 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3), | |
96 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3), | |
97 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4), | |
98 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4), | |
99 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5), | |
100 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5), | |
101 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6), | |
102 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6), | |
103 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7), | |
104 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7), | |
105 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8), | |
106 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8), | |
107 | IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9), | |
108 | IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9), | |
109 | IOMMU_REGSET_ENTRY(VCCAP), | |
110 | IOMMU_REGSET_ENTRY(VCMD), | |
111 | IOMMU_REGSET_ENTRY(VCRSP), | |
112 | }; | |
113 | ||
114 | static int iommu_regset_show(struct seq_file *m, void *unused) | |
115 | { | |
116 | struct dmar_drhd_unit *drhd; | |
117 | struct intel_iommu *iommu; | |
118 | unsigned long flag; | |
119 | int i, ret = 0; | |
120 | u64 value; | |
121 | ||
122 | rcu_read_lock(); | |
123 | for_each_active_iommu(iommu, drhd) { | |
124 | if (!drhd->reg_base_addr) { | |
125 | seq_puts(m, "IOMMU: Invalid base address\n"); | |
126 | ret = -EINVAL; | |
127 | goto out; | |
128 | } | |
129 | ||
130 | seq_printf(m, "IOMMU: %s Register Base Address: %llx\n", | |
131 | iommu->name, drhd->reg_base_addr); | |
132 | seq_puts(m, "Name\t\t\tOffset\t\tContents\n"); | |
133 | /* | |
134 | * Publish the contents of the 64-bit hardware registers | |
135 | * by adding the offset to the pointer (virtual address). | |
136 | */ | |
137 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | |
ba3b01d7 MD |
138 | for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) { |
139 | value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); | |
140 | seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", | |
141 | iommu_regs_32[i].regs, iommu_regs_32[i].offset, | |
142 | value); | |
143 | } | |
144 | for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) { | |
145 | value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); | |
6825d3ea | 146 | seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", |
ba3b01d7 | 147 | iommu_regs_64[i].regs, iommu_regs_64[i].offset, |
6825d3ea GK |
148 | value); |
149 | } | |
150 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | |
151 | seq_putc(m, '\n'); | |
152 | } | |
153 | out: | |
154 | rcu_read_unlock(); | |
155 | ||
156 | return ret; | |
157 | } | |
158 | DEFINE_SHOW_ATTRIBUTE(iommu_regset); | |
159 | ||
ea09506c | 160 | static inline void print_tbl_walk(struct seq_file *m) |
18f99c9b | 161 | { |
ea09506c | 162 | struct tbl_walk *tbl_wlk = m->private; |
18f99c9b | 163 | |
dd5142ca | 164 | seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t", |
ea09506c SPP |
165 | tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn), |
166 | PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi, | |
167 | tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi, | |
168 | tbl_wlk->ctx_entry->lo); | |
dd5142ca SPP |
169 | |
170 | /* | |
171 | * A legacy mode DMAR doesn't support PASID, hence default it to -1 | |
172 | * indicating that it's invalid. Also, default all PASID related fields | |
173 | * to 0. | |
174 | */ | |
175 | if (!tbl_wlk->pasid_tbl_entry) | |
176 | seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1, | |
177 | (u64)0, (u64)0, (u64)0); | |
178 | else | |
179 | seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", | |
7f6cade5 | 180 | tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2], |
dd5142ca | 181 | tbl_wlk->pasid_tbl_entry->val[1], |
7f6cade5 | 182 | tbl_wlk->pasid_tbl_entry->val[0]); |
dd5142ca SPP |
183 | } |
184 | ||
185 | static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, | |
186 | u16 dir_idx) | |
187 | { | |
188 | struct tbl_walk *tbl_wlk = m->private; | |
189 | u8 tbl_idx; | |
190 | ||
191 | for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) { | |
192 | if (pasid_pte_is_present(tbl_entry)) { | |
193 | tbl_wlk->pasid_tbl_entry = tbl_entry; | |
194 | tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx; | |
195 | print_tbl_walk(m); | |
196 | } | |
197 | ||
198 | tbl_entry++; | |
199 | } | |
200 | } | |
201 | ||
202 | static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr, | |
203 | u16 pasid_dir_size) | |
204 | { | |
205 | struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr); | |
206 | struct pasid_entry *pasid_tbl; | |
207 | u16 dir_idx; | |
208 | ||
209 | for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) { | |
210 | pasid_tbl = get_pasid_table_from_pde(dir_entry); | |
211 | if (pasid_tbl) | |
212 | pasid_tbl_walk(m, pasid_tbl, dir_idx); | |
213 | ||
214 | dir_entry++; | |
215 | } | |
ea09506c SPP |
216 | } |
217 | ||
218 | static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) | |
219 | { | |
220 | struct context_entry *context; | |
dd5142ca SPP |
221 | u16 devfn, pasid_dir_size; |
222 | u64 pasid_dir_ptr; | |
18f99c9b SM |
223 | |
224 | for (devfn = 0; devfn < 256; devfn++) { | |
ea09506c SPP |
225 | struct tbl_walk tbl_wlk = {0}; |
226 | ||
dd5142ca SPP |
227 | /* |
228 | * Scalable mode root entry points to upper scalable mode | |
229 | * context table and lower scalable mode context table. Each | |
230 | * scalable mode context table has 128 context entries where as | |
231 | * legacy mode context table has 256 context entries. So in | |
232 | * scalable mode, the context entries for former 128 devices are | |
233 | * in the lower scalable mode context table, while the latter | |
234 | * 128 devices are in the upper scalable mode context table. | |
235 | * In scalable mode, when devfn > 127, iommu_context_addr() | |
236 | * automatically refers to upper scalable mode context table and | |
237 | * hence the caller doesn't have to worry about differences | |
238 | * between scalable mode and non scalable mode. | |
239 | */ | |
18f99c9b SM |
240 | context = iommu_context_addr(iommu, bus, devfn, 0); |
241 | if (!context) | |
242 | return; | |
243 | ||
244 | if (!context_present(context)) | |
245 | continue; | |
246 | ||
ea09506c SPP |
247 | tbl_wlk.bus = bus; |
248 | tbl_wlk.devfn = devfn; | |
249 | tbl_wlk.rt_entry = &iommu->root_entry[bus]; | |
250 | tbl_wlk.ctx_entry = context; | |
251 | m->private = &tbl_wlk; | |
252 | ||
bfeaec7f | 253 | if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) { |
dd5142ca SPP |
254 | pasid_dir_ptr = context->lo & VTD_PAGE_MASK; |
255 | pasid_dir_size = get_pasid_dir_size(context); | |
256 | pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); | |
257 | continue; | |
258 | } | |
259 | ||
ea09506c | 260 | print_tbl_walk(m); |
18f99c9b SM |
261 | } |
262 | } | |
263 | ||
ea09506c | 264 | static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu) |
18f99c9b SM |
265 | { |
266 | unsigned long flags; | |
ea09506c | 267 | u16 bus; |
18f99c9b SM |
268 | |
269 | spin_lock_irqsave(&iommu->lock, flags); | |
ea09506c | 270 | seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name, |
18f99c9b | 271 | (u64)virt_to_phys(iommu->root_entry)); |
dd5142ca | 272 | seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n"); |
18f99c9b | 273 | |
ea09506c SPP |
274 | /* |
275 | * No need to check if the root entry is present or not because | |
276 | * iommu_context_addr() performs the same check before returning | |
277 | * context entry. | |
278 | */ | |
279 | for (bus = 0; bus < 256; bus++) | |
280 | ctx_tbl_walk(m, iommu, bus); | |
18f99c9b | 281 | |
18f99c9b SM |
282 | spin_unlock_irqrestore(&iommu->lock, flags); |
283 | } | |
284 | ||
285 | static int dmar_translation_struct_show(struct seq_file *m, void *unused) | |
286 | { | |
287 | struct dmar_drhd_unit *drhd; | |
288 | struct intel_iommu *iommu; | |
1da8347d | 289 | u32 sts; |
18f99c9b SM |
290 | |
291 | rcu_read_lock(); | |
292 | for_each_active_iommu(iommu, drhd) { | |
1da8347d MD |
293 | sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); |
294 | if (!(sts & DMA_GSTS_TES)) { | |
295 | seq_printf(m, "DMA Remapping is not enabled on %s\n", | |
296 | iommu->name); | |
297 | continue; | |
298 | } | |
ea09506c | 299 | root_tbl_walk(m, iommu); |
18f99c9b SM |
300 | seq_putc(m, '\n'); |
301 | } | |
302 | rcu_read_unlock(); | |
303 | ||
304 | return 0; | |
305 | } | |
306 | DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct); | |
307 | ||
e2726dae LB |
308 | static inline unsigned long level_to_directory_size(int level) |
309 | { | |
310 | return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1)); | |
311 | } | |
312 | ||
313 | static inline void | |
314 | dump_page_info(struct seq_file *m, unsigned long iova, u64 *path) | |
315 | { | |
316 | seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n", | |
317 | iova >> VTD_PAGE_SHIFT, path[5], path[4], | |
318 | path[3], path[2], path[1]); | |
319 | } | |
320 | ||
321 | static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde, | |
322 | int level, unsigned long start, | |
323 | u64 *path) | |
324 | { | |
325 | int i; | |
326 | ||
327 | if (level > 5 || level < 1) | |
328 | return; | |
329 | ||
330 | for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT); | |
331 | i++, pde++, start += level_to_directory_size(level)) { | |
332 | if (!dma_pte_present(pde)) | |
333 | continue; | |
334 | ||
335 | path[level] = pde->val; | |
336 | if (dma_pte_superpage(pde) || level == 1) | |
337 | dump_page_info(m, start, path); | |
338 | else | |
339 | pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)), | |
340 | level - 1, start, path); | |
341 | path[level] = 0; | |
342 | } | |
343 | } | |
344 | ||
345 | static int show_device_domain_translation(struct device *dev, void *data) | |
346 | { | |
347 | struct dmar_domain *domain = find_domain(dev); | |
348 | struct seq_file *m = data; | |
349 | u64 path[6] = { 0 }; | |
350 | ||
351 | if (!domain) | |
352 | return 0; | |
353 | ||
354 | seq_printf(m, "Device %s with pasid %d @0x%llx\n", | |
355 | dev_name(dev), domain->default_pasid, | |
356 | (u64)virt_to_phys(domain->pgd)); | |
357 | seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); | |
358 | ||
359 | pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path); | |
360 | seq_putc(m, '\n'); | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
365 | static int domain_translation_struct_show(struct seq_file *m, void *unused) | |
366 | { | |
367 | unsigned long flags; | |
368 | int ret; | |
369 | ||
370 | spin_lock_irqsave(&device_domain_lock, flags); | |
371 | ret = bus_for_each_dev(&pci_bus_type, NULL, m, | |
372 | show_device_domain_translation); | |
373 | spin_unlock_irqrestore(&device_domain_lock, flags); | |
374 | ||
375 | return ret; | |
376 | } | |
377 | DEFINE_SHOW_ATTRIBUTE(domain_translation_struct); | |
378 | ||
4c0fa5bf LB |
379 | static void invalidation_queue_entry_show(struct seq_file *m, |
380 | struct intel_iommu *iommu) | |
381 | { | |
382 | int index, shift = qi_shift(iommu); | |
383 | struct qi_desc *desc; | |
384 | int offset; | |
385 | ||
386 | if (ecap_smts(iommu->ecap)) | |
387 | seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n"); | |
388 | else | |
389 | seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n"); | |
390 | ||
391 | for (index = 0; index < QI_LENGTH; index++) { | |
392 | offset = index << shift; | |
393 | desc = iommu->qi->desc + offset; | |
394 | if (ecap_smts(iommu->ecap)) | |
395 | seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n", | |
396 | index, desc->qw0, desc->qw1, | |
397 | desc->qw2, desc->qw3, | |
398 | iommu->qi->desc_status[index]); | |
399 | else | |
400 | seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n", | |
401 | index, desc->qw0, desc->qw1, | |
402 | iommu->qi->desc_status[index]); | |
403 | } | |
404 | } | |
405 | ||
406 | static int invalidation_queue_show(struct seq_file *m, void *unused) | |
407 | { | |
408 | struct dmar_drhd_unit *drhd; | |
409 | struct intel_iommu *iommu; | |
410 | unsigned long flags; | |
411 | struct q_inval *qi; | |
412 | int shift; | |
413 | ||
414 | rcu_read_lock(); | |
415 | for_each_active_iommu(iommu, drhd) { | |
416 | qi = iommu->qi; | |
417 | shift = qi_shift(iommu); | |
418 | ||
419 | if (!qi || !ecap_qis(iommu->ecap)) | |
420 | continue; | |
421 | ||
422 | seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name); | |
423 | ||
424 | raw_spin_lock_irqsave(&qi->q_lock, flags); | |
425 | seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n", | |
426 | (u64)virt_to_phys(qi->desc), | |
427 | dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift, | |
428 | dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift); | |
429 | invalidation_queue_entry_show(m, iommu); | |
430 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); | |
431 | seq_putc(m, '\n'); | |
432 | } | |
433 | rcu_read_unlock(); | |
434 | ||
435 | return 0; | |
436 | } | |
437 | DEFINE_SHOW_ATTRIBUTE(invalidation_queue); | |
438 | ||
a6d268c6 SM |
439 | #ifdef CONFIG_IRQ_REMAP |
440 | static void ir_tbl_remap_entry_show(struct seq_file *m, | |
441 | struct intel_iommu *iommu) | |
442 | { | |
443 | struct irte *ri_entry; | |
444 | unsigned long flags; | |
445 | int idx; | |
446 | ||
447 | seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n"); | |
448 | ||
449 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | |
450 | for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) { | |
451 | ri_entry = &iommu->ir_table->base[idx]; | |
452 | if (!ri_entry->present || ri_entry->p_pst) | |
453 | continue; | |
454 | ||
455 | seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n", | |
456 | idx, PCI_BUS_NUM(ri_entry->sid), | |
457 | PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid), | |
458 | ri_entry->dest_id, ri_entry->vector, | |
459 | ri_entry->high, ri_entry->low); | |
460 | } | |
461 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | |
462 | } | |
463 | ||
464 | static void ir_tbl_posted_entry_show(struct seq_file *m, | |
465 | struct intel_iommu *iommu) | |
466 | { | |
467 | struct irte *pi_entry; | |
468 | unsigned long flags; | |
469 | int idx; | |
470 | ||
471 | seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n"); | |
472 | ||
473 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | |
474 | for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) { | |
475 | pi_entry = &iommu->ir_table->base[idx]; | |
476 | if (!pi_entry->present || !pi_entry->p_pst) | |
477 | continue; | |
478 | ||
479 | seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n", | |
480 | idx, PCI_BUS_NUM(pi_entry->sid), | |
481 | PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid), | |
482 | pi_entry->pda_h, pi_entry->pda_l << 6, | |
483 | pi_entry->vector, pi_entry->high, | |
484 | pi_entry->low); | |
485 | } | |
486 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | |
487 | } | |
488 | ||
489 | /* | |
490 | * For active IOMMUs go through the Interrupt remapping | |
491 | * table and print valid entries in a table format for | |
492 | * Remapped and Posted Interrupts. | |
493 | */ | |
494 | static int ir_translation_struct_show(struct seq_file *m, void *unused) | |
495 | { | |
496 | struct dmar_drhd_unit *drhd; | |
497 | struct intel_iommu *iommu; | |
498 | u64 irta; | |
1da8347d | 499 | u32 sts; |
a6d268c6 SM |
500 | |
501 | rcu_read_lock(); | |
502 | for_each_active_iommu(iommu, drhd) { | |
503 | if (!ecap_ir_support(iommu->ecap)) | |
504 | continue; | |
505 | ||
506 | seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n", | |
507 | iommu->name); | |
508 | ||
1da8347d MD |
509 | sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); |
510 | if (iommu->ir_table && (sts & DMA_GSTS_IRES)) { | |
a6d268c6 SM |
511 | irta = virt_to_phys(iommu->ir_table->base); |
512 | seq_printf(m, " IR table address:%llx\n", irta); | |
513 | ir_tbl_remap_entry_show(m, iommu); | |
514 | } else { | |
515 | seq_puts(m, "Interrupt Remapping is not enabled\n"); | |
516 | } | |
517 | seq_putc(m, '\n'); | |
518 | } | |
519 | ||
520 | seq_puts(m, "****\n\n"); | |
521 | ||
522 | for_each_active_iommu(iommu, drhd) { | |
523 | if (!cap_pi_support(iommu->cap)) | |
524 | continue; | |
525 | ||
526 | seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n", | |
527 | iommu->name); | |
528 | ||
529 | if (iommu->ir_table) { | |
530 | irta = virt_to_phys(iommu->ir_table->base); | |
531 | seq_printf(m, " IR table address:%llx\n", irta); | |
532 | ir_tbl_posted_entry_show(m, iommu); | |
533 | } else { | |
534 | seq_puts(m, "Interrupt Remapping is not enabled\n"); | |
535 | } | |
536 | seq_putc(m, '\n'); | |
537 | } | |
538 | rcu_read_unlock(); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | DEFINE_SHOW_ATTRIBUTE(ir_translation_struct); | |
543 | #endif | |
544 | ||
456bb0b9 LB |
545 | static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu, |
546 | struct dmar_drhd_unit *drhd) | |
547 | { | |
548 | int ret; | |
549 | ||
550 | seq_printf(m, "IOMMU: %s Register Base Address: %llx\n", | |
551 | iommu->name, drhd->reg_base_addr); | |
552 | ||
553 | ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE); | |
554 | if (ret < 0) | |
555 | seq_puts(m, "Failed to get latency snapshot"); | |
556 | else | |
557 | seq_puts(m, debug_buf); | |
558 | seq_puts(m, "\n"); | |
559 | } | |
560 | ||
561 | static int latency_show(struct seq_file *m, void *v) | |
562 | { | |
563 | struct dmar_drhd_unit *drhd; | |
564 | struct intel_iommu *iommu; | |
565 | ||
566 | rcu_read_lock(); | |
567 | for_each_active_iommu(iommu, drhd) | |
568 | latency_show_one(m, iommu, drhd); | |
569 | rcu_read_unlock(); | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static int dmar_perf_latency_open(struct inode *inode, struct file *filp) | |
575 | { | |
576 | return single_open(filp, latency_show, NULL); | |
577 | } | |
578 | ||
579 | static ssize_t dmar_perf_latency_write(struct file *filp, | |
580 | const char __user *ubuf, | |
581 | size_t cnt, loff_t *ppos) | |
582 | { | |
583 | struct dmar_drhd_unit *drhd; | |
584 | struct intel_iommu *iommu; | |
585 | int counting; | |
586 | char buf[64]; | |
587 | ||
588 | if (cnt > 63) | |
589 | cnt = 63; | |
590 | ||
591 | if (copy_from_user(&buf, ubuf, cnt)) | |
592 | return -EFAULT; | |
593 | ||
594 | buf[cnt] = 0; | |
595 | ||
596 | if (kstrtoint(buf, 0, &counting)) | |
597 | return -EINVAL; | |
598 | ||
599 | switch (counting) { | |
600 | case 0: | |
601 | rcu_read_lock(); | |
602 | for_each_active_iommu(iommu, drhd) { | |
603 | dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB); | |
604 | dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB); | |
605 | dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC); | |
606 | dmar_latency_disable(iommu, DMAR_LATENCY_PRQ); | |
607 | } | |
608 | rcu_read_unlock(); | |
609 | break; | |
610 | case 1: | |
611 | rcu_read_lock(); | |
612 | for_each_active_iommu(iommu, drhd) | |
613 | dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB); | |
614 | rcu_read_unlock(); | |
615 | break; | |
616 | case 2: | |
617 | rcu_read_lock(); | |
618 | for_each_active_iommu(iommu, drhd) | |
619 | dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB); | |
620 | rcu_read_unlock(); | |
621 | break; | |
622 | case 3: | |
623 | rcu_read_lock(); | |
624 | for_each_active_iommu(iommu, drhd) | |
625 | dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC); | |
626 | rcu_read_unlock(); | |
627 | break; | |
628 | case 4: | |
629 | rcu_read_lock(); | |
630 | for_each_active_iommu(iommu, drhd) | |
631 | dmar_latency_enable(iommu, DMAR_LATENCY_PRQ); | |
632 | rcu_read_unlock(); | |
633 | break; | |
634 | default: | |
635 | return -EINVAL; | |
636 | } | |
637 | ||
638 | *ppos += cnt; | |
639 | return cnt; | |
640 | } | |
641 | ||
642 | static const struct file_operations dmar_perf_latency_fops = { | |
643 | .open = dmar_perf_latency_open, | |
644 | .write = dmar_perf_latency_write, | |
645 | .read = seq_read, | |
646 | .llseek = seq_lseek, | |
647 | .release = single_release, | |
648 | }; | |
649 | ||
ee2636b8 SM |
650 | void __init intel_iommu_debugfs_init(void) |
651 | { | |
6825d3ea GK |
652 | struct dentry *intel_iommu_debug = debugfs_create_dir("intel", |
653 | iommu_debugfs_dir); | |
654 | ||
655 | debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL, | |
656 | &iommu_regset_fops); | |
18f99c9b SM |
657 | debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug, |
658 | NULL, &dmar_translation_struct_fops); | |
e2726dae LB |
659 | debugfs_create_file("domain_translation_struct", 0444, |
660 | intel_iommu_debug, NULL, | |
661 | &domain_translation_struct_fops); | |
4c0fa5bf LB |
662 | debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug, |
663 | NULL, &invalidation_queue_fops); | |
a6d268c6 SM |
664 | #ifdef CONFIG_IRQ_REMAP |
665 | debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug, | |
666 | NULL, &ir_translation_struct_fops); | |
667 | #endif | |
456bb0b9 LB |
668 | debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug, |
669 | NULL, &dmar_perf_latency_fops); | |
ee2636b8 | 670 | } |