Commit | Line | Data |
---|---|---|
10e5247f KA |
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
98bcef56 | 17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Author: Ashok Raj <ashok.raj@intel.com> | |
19 | * Author: Shaohua Li <shaohua.li@intel.com> | |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | |
10e5247f | 21 | * |
e61d98d8 | 22 | * This file implements early detection/parsing of Remapping Devices |
10e5247f KA |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | |
e61d98d8 SS |
25 | * |
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | |
10e5247f KA |
27 | */ |
28 | ||
29 | #include <linux/pci.h> | |
30 | #include <linux/dmar.h> | |
38717946 KA |
31 | #include <linux/iova.h> |
32 | #include <linux/intel-iommu.h> | |
fe962e90 | 33 | #include <linux/timer.h> |
0ac2491f SS |
34 | #include <linux/irq.h> |
35 | #include <linux/interrupt.h> | |
10e5247f KA |
36 | |
37 | #undef PREFIX | |
38 | #define PREFIX "DMAR:" | |
39 | ||
40 | /* No locks are needed as DMA remapping hardware unit | |
41 | * list is constructed at boot time and hotplug of | |
42 | * these units are not supported by the architecture. | |
43 | */ | |
44 | LIST_HEAD(dmar_drhd_units); | |
10e5247f KA |
45 | |
46 | static struct acpi_table_header * __initdata dmar_tbl; | |
8e1568f3 | 47 | static acpi_size dmar_tbl_size; |
10e5247f KA |
48 | |
49 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |
50 | { | |
51 | /* | |
52 | * add INCLUDE_ALL at the tail, so scan the list will find it at | |
53 | * the very end. | |
54 | */ | |
55 | if (drhd->include_all) | |
56 | list_add_tail(&drhd->list, &dmar_drhd_units); | |
57 | else | |
58 | list_add(&drhd->list, &dmar_drhd_units); | |
59 | } | |
60 | ||
10e5247f KA |
61 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
62 | struct pci_dev **dev, u16 segment) | |
63 | { | |
64 | struct pci_bus *bus; | |
65 | struct pci_dev *pdev = NULL; | |
66 | struct acpi_dmar_pci_path *path; | |
67 | int count; | |
68 | ||
69 | bus = pci_find_bus(segment, scope->bus); | |
70 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
71 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
72 | / sizeof(struct acpi_dmar_pci_path); | |
73 | ||
74 | while (count) { | |
75 | if (pdev) | |
76 | pci_dev_put(pdev); | |
77 | /* | |
78 | * Some BIOSes list non-exist devices in DMAR table, just | |
79 | * ignore it | |
80 | */ | |
81 | if (!bus) { | |
82 | printk(KERN_WARNING | |
83 | PREFIX "Device scope bus [%d] not found\n", | |
84 | scope->bus); | |
85 | break; | |
86 | } | |
87 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); | |
88 | if (!pdev) { | |
89 | printk(KERN_WARNING PREFIX | |
90 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", | |
91 | segment, bus->number, path->dev, path->fn); | |
92 | break; | |
93 | } | |
94 | path ++; | |
95 | count --; | |
96 | bus = pdev->subordinate; | |
97 | } | |
98 | if (!pdev) { | |
99 | printk(KERN_WARNING PREFIX | |
100 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", | |
101 | segment, scope->bus, path->dev, path->fn); | |
102 | *dev = NULL; | |
103 | return 0; | |
104 | } | |
105 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ | |
106 | pdev->subordinate) || (scope->entry_type == \ | |
107 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { | |
108 | pci_dev_put(pdev); | |
109 | printk(KERN_WARNING PREFIX | |
110 | "Device scope type does not match for %s\n", | |
111 | pci_name(pdev)); | |
112 | return -EINVAL; | |
113 | } | |
114 | *dev = pdev; | |
115 | return 0; | |
116 | } | |
117 | ||
118 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | |
119 | struct pci_dev ***devices, u16 segment) | |
120 | { | |
121 | struct acpi_dmar_device_scope *scope; | |
122 | void * tmp = start; | |
123 | int index; | |
124 | int ret; | |
125 | ||
126 | *cnt = 0; | |
127 | while (start < end) { | |
128 | scope = start; | |
129 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
130 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | |
131 | (*cnt)++; | |
132 | else | |
133 | printk(KERN_WARNING PREFIX | |
134 | "Unsupported device scope\n"); | |
135 | start += scope->length; | |
136 | } | |
137 | if (*cnt == 0) | |
138 | return 0; | |
139 | ||
140 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); | |
141 | if (!*devices) | |
142 | return -ENOMEM; | |
143 | ||
144 | start = tmp; | |
145 | index = 0; | |
146 | while (start < end) { | |
147 | scope = start; | |
148 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
149 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { | |
150 | ret = dmar_parse_one_dev_scope(scope, | |
151 | &(*devices)[index], segment); | |
152 | if (ret) { | |
153 | kfree(*devices); | |
154 | return ret; | |
155 | } | |
156 | index ++; | |
157 | } | |
158 | start += scope->length; | |
159 | } | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | /** | |
165 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | |
166 | * structure which uniquely represent one DMA remapping hardware unit | |
167 | * present in the platform | |
168 | */ | |
169 | static int __init | |
170 | dmar_parse_one_drhd(struct acpi_dmar_header *header) | |
171 | { | |
172 | struct acpi_dmar_hardware_unit *drhd; | |
173 | struct dmar_drhd_unit *dmaru; | |
174 | int ret = 0; | |
10e5247f | 175 | |
e523b38e DW |
176 | drhd = (struct acpi_dmar_hardware_unit *)header; |
177 | if (!drhd->address) { | |
178 | /* Promote an attitude of violence to a BIOS engineer today */ | |
179 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | |
180 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | |
181 | dmi_get_system_info(DMI_BIOS_VENDOR), | |
182 | dmi_get_system_info(DMI_BIOS_VERSION), | |
183 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | |
184 | return -ENODEV; | |
185 | } | |
10e5247f KA |
186 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
187 | if (!dmaru) | |
188 | return -ENOMEM; | |
189 | ||
1886e8a9 | 190 | dmaru->hdr = header; |
10e5247f | 191 | dmaru->reg_base_addr = drhd->address; |
276dbf99 | 192 | dmaru->segment = drhd->segment; |
10e5247f KA |
193 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
194 | ||
1886e8a9 SS |
195 | ret = alloc_iommu(dmaru); |
196 | if (ret) { | |
197 | kfree(dmaru); | |
198 | return ret; | |
199 | } | |
200 | dmar_register_drhd_unit(dmaru); | |
201 | return 0; | |
202 | } | |
203 | ||
f82851a8 | 204 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
1886e8a9 SS |
205 | { |
206 | struct acpi_dmar_hardware_unit *drhd; | |
f82851a8 | 207 | int ret = 0; |
1886e8a9 SS |
208 | |
209 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | |
210 | ||
2e824f79 YZ |
211 | if (dmaru->include_all) |
212 | return 0; | |
213 | ||
214 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | |
1886e8a9 | 215 | ((void *)drhd) + drhd->header.length, |
10e5247f KA |
216 | &dmaru->devices_cnt, &dmaru->devices, |
217 | drhd->segment); | |
1c7d1bca | 218 | if (ret) { |
1886e8a9 | 219 | list_del(&dmaru->list); |
10e5247f | 220 | kfree(dmaru); |
1886e8a9 | 221 | } |
10e5247f KA |
222 | return ret; |
223 | } | |
224 | ||
aaa9d1dd SS |
225 | #ifdef CONFIG_DMAR |
226 | LIST_HEAD(dmar_rmrr_units); | |
227 | ||
228 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | |
229 | { | |
230 | list_add(&rmrr->list, &dmar_rmrr_units); | |
231 | } | |
232 | ||
233 | ||
10e5247f KA |
234 | static int __init |
235 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |
236 | { | |
237 | struct acpi_dmar_reserved_memory *rmrr; | |
238 | struct dmar_rmrr_unit *rmrru; | |
10e5247f KA |
239 | |
240 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | |
241 | if (!rmrru) | |
242 | return -ENOMEM; | |
243 | ||
1886e8a9 | 244 | rmrru->hdr = header; |
10e5247f KA |
245 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
246 | rmrru->base_address = rmrr->base_address; | |
247 | rmrru->end_address = rmrr->end_address; | |
1886e8a9 SS |
248 | |
249 | dmar_register_rmrr_unit(rmrru); | |
250 | return 0; | |
251 | } | |
252 | ||
253 | static int __init | |
254 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | |
255 | { | |
256 | struct acpi_dmar_reserved_memory *rmrr; | |
257 | int ret; | |
258 | ||
259 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | |
10e5247f | 260 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
1886e8a9 | 261 | ((void *)rmrr) + rmrr->header.length, |
10e5247f KA |
262 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
263 | ||
1886e8a9 SS |
264 | if (ret || (rmrru->devices_cnt == 0)) { |
265 | list_del(&rmrru->list); | |
10e5247f | 266 | kfree(rmrru); |
1886e8a9 | 267 | } |
10e5247f KA |
268 | return ret; |
269 | } | |
aaa9d1dd | 270 | #endif |
10e5247f KA |
271 | |
272 | static void __init | |
273 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |
274 | { | |
275 | struct acpi_dmar_hardware_unit *drhd; | |
276 | struct acpi_dmar_reserved_memory *rmrr; | |
277 | ||
278 | switch (header->type) { | |
279 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
280 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
281 | printk (KERN_INFO PREFIX | |
282 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", | |
5b6985ce | 283 | drhd->flags, (unsigned long long)drhd->address); |
10e5247f KA |
284 | break; |
285 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
286 | rmrr = (struct acpi_dmar_reserved_memory *)header; | |
287 | ||
288 | printk (KERN_INFO PREFIX | |
289 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", | |
5b6985ce FY |
290 | (unsigned long long)rmrr->base_address, |
291 | (unsigned long long)rmrr->end_address); | |
10e5247f KA |
292 | break; |
293 | } | |
294 | } | |
295 | ||
f6dd5c31 YL |
296 | /** |
297 | * dmar_table_detect - checks to see if the platform supports DMAR devices | |
298 | */ | |
299 | static int __init dmar_table_detect(void) | |
300 | { | |
301 | acpi_status status = AE_OK; | |
302 | ||
303 | /* if we could find DMAR table, then there are DMAR devices */ | |
8e1568f3 YL |
304 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
305 | (struct acpi_table_header **)&dmar_tbl, | |
306 | &dmar_tbl_size); | |
f6dd5c31 YL |
307 | |
308 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | |
309 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | |
310 | status = AE_NOT_FOUND; | |
311 | } | |
312 | ||
313 | return (ACPI_SUCCESS(status) ? 1 : 0); | |
314 | } | |
aaa9d1dd | 315 | |
10e5247f KA |
316 | /** |
317 | * parse_dmar_table - parses the DMA reporting table | |
318 | */ | |
319 | static int __init | |
320 | parse_dmar_table(void) | |
321 | { | |
322 | struct acpi_table_dmar *dmar; | |
323 | struct acpi_dmar_header *entry_header; | |
324 | int ret = 0; | |
325 | ||
f6dd5c31 YL |
326 | /* |
327 | * Do it again, earlier dmar_tbl mapping could be mapped with | |
328 | * fixed map. | |
329 | */ | |
330 | dmar_table_detect(); | |
331 | ||
10e5247f KA |
332 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
333 | if (!dmar) | |
334 | return -ENODEV; | |
335 | ||
5b6985ce | 336 | if (dmar->width < PAGE_SHIFT - 1) { |
093f87d2 | 337 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
10e5247f KA |
338 | return -EINVAL; |
339 | } | |
340 | ||
341 | printk (KERN_INFO PREFIX "Host address width %d\n", | |
342 | dmar->width + 1); | |
343 | ||
344 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | |
345 | while (((unsigned long)entry_header) < | |
346 | (((unsigned long)dmar) + dmar_tbl->length)) { | |
084eb960 TB |
347 | /* Avoid looping forever on bad ACPI tables */ |
348 | if (entry_header->length == 0) { | |
349 | printk(KERN_WARNING PREFIX | |
350 | "Invalid 0-length structure\n"); | |
351 | ret = -EINVAL; | |
352 | break; | |
353 | } | |
354 | ||
10e5247f KA |
355 | dmar_table_print_dmar_entry(entry_header); |
356 | ||
357 | switch (entry_header->type) { | |
358 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
359 | ret = dmar_parse_one_drhd(entry_header); | |
360 | break; | |
361 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
aaa9d1dd | 362 | #ifdef CONFIG_DMAR |
10e5247f | 363 | ret = dmar_parse_one_rmrr(entry_header); |
aaa9d1dd | 364 | #endif |
10e5247f KA |
365 | break; |
366 | default: | |
367 | printk(KERN_WARNING PREFIX | |
368 | "Unknown DMAR structure type\n"); | |
369 | ret = 0; /* for forward compatibility */ | |
370 | break; | |
371 | } | |
372 | if (ret) | |
373 | break; | |
374 | ||
375 | entry_header = ((void *)entry_header + entry_header->length); | |
376 | } | |
377 | return ret; | |
378 | } | |
379 | ||
e61d98d8 SS |
380 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
381 | struct pci_dev *dev) | |
382 | { | |
383 | int index; | |
384 | ||
385 | while (dev) { | |
386 | for (index = 0; index < cnt; index++) | |
387 | if (dev == devices[index]) | |
388 | return 1; | |
389 | ||
390 | /* Check our parent */ | |
391 | dev = dev->bus->self; | |
392 | } | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
397 | struct dmar_drhd_unit * | |
398 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | |
399 | { | |
2e824f79 YZ |
400 | struct dmar_drhd_unit *dmaru = NULL; |
401 | struct acpi_dmar_hardware_unit *drhd; | |
402 | ||
403 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { | |
404 | drhd = container_of(dmaru->hdr, | |
405 | struct acpi_dmar_hardware_unit, | |
406 | header); | |
407 | ||
408 | if (dmaru->include_all && | |
409 | drhd->segment == pci_domain_nr(dev->bus)) | |
410 | return dmaru; | |
e61d98d8 | 411 | |
2e824f79 YZ |
412 | if (dmar_pci_device_match(dmaru->devices, |
413 | dmaru->devices_cnt, dev)) | |
414 | return dmaru; | |
e61d98d8 SS |
415 | } |
416 | ||
417 | return NULL; | |
418 | } | |
419 | ||
1886e8a9 SS |
420 | int __init dmar_dev_scope_init(void) |
421 | { | |
04e2ea67 | 422 | struct dmar_drhd_unit *drhd, *drhd_n; |
1886e8a9 SS |
423 | int ret = -ENODEV; |
424 | ||
04e2ea67 | 425 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
1886e8a9 SS |
426 | ret = dmar_parse_dev(drhd); |
427 | if (ret) | |
428 | return ret; | |
429 | } | |
430 | ||
aaa9d1dd SS |
431 | #ifdef CONFIG_DMAR |
432 | { | |
04e2ea67 SS |
433 | struct dmar_rmrr_unit *rmrr, *rmrr_n; |
434 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | |
aaa9d1dd SS |
435 | ret = rmrr_parse_dev(rmrr); |
436 | if (ret) | |
437 | return ret; | |
438 | } | |
1886e8a9 | 439 | } |
aaa9d1dd | 440 | #endif |
1886e8a9 SS |
441 | |
442 | return ret; | |
443 | } | |
444 | ||
10e5247f KA |
445 | |
446 | int __init dmar_table_init(void) | |
447 | { | |
1886e8a9 | 448 | static int dmar_table_initialized; |
093f87d2 FY |
449 | int ret; |
450 | ||
1886e8a9 SS |
451 | if (dmar_table_initialized) |
452 | return 0; | |
453 | ||
454 | dmar_table_initialized = 1; | |
455 | ||
093f87d2 FY |
456 | ret = parse_dmar_table(); |
457 | if (ret) { | |
1886e8a9 SS |
458 | if (ret != -ENODEV) |
459 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | |
093f87d2 FY |
460 | return ret; |
461 | } | |
462 | ||
10e5247f KA |
463 | if (list_empty(&dmar_drhd_units)) { |
464 | printk(KERN_INFO PREFIX "No DMAR devices found\n"); | |
465 | return -ENODEV; | |
466 | } | |
093f87d2 | 467 | |
aaa9d1dd | 468 | #ifdef CONFIG_DMAR |
2d6b5f85 | 469 | if (list_empty(&dmar_rmrr_units)) |
093f87d2 | 470 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
aaa9d1dd | 471 | #endif |
093f87d2 | 472 | |
ad3ad3f6 SS |
473 | #ifdef CONFIG_INTR_REMAP |
474 | parse_ioapics_under_ir(); | |
475 | #endif | |
10e5247f KA |
476 | return 0; |
477 | } | |
478 | ||
2ae21010 SS |
479 | void __init detect_intel_iommu(void) |
480 | { | |
481 | int ret; | |
482 | ||
f6dd5c31 | 483 | ret = dmar_table_detect(); |
2ae21010 | 484 | |
2ae21010 | 485 | { |
cacd4213 | 486 | #ifdef CONFIG_INTR_REMAP |
1cb11583 SS |
487 | struct acpi_table_dmar *dmar; |
488 | /* | |
489 | * for now we will disable dma-remapping when interrupt | |
490 | * remapping is enabled. | |
491 | * When support for queued invalidation for IOTLB invalidation | |
492 | * is added, we will not need this any more. | |
493 | */ | |
494 | dmar = (struct acpi_table_dmar *) dmar_tbl; | |
cacd4213 | 495 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
1cb11583 SS |
496 | printk(KERN_INFO |
497 | "Queued invalidation will be enabled to support " | |
498 | "x2apic and Intr-remapping.\n"); | |
cacd4213 | 499 | #endif |
cacd4213 | 500 | #ifdef CONFIG_DMAR |
2ae21010 SS |
501 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
502 | !dmar_disabled) | |
503 | iommu_detected = 1; | |
2ae21010 | 504 | #endif |
cacd4213 | 505 | } |
8e1568f3 | 506 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
f6dd5c31 | 507 | dmar_tbl = NULL; |
2ae21010 SS |
508 | } |
509 | ||
510 | ||
1886e8a9 | 511 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
e61d98d8 | 512 | { |
c42d9f32 | 513 | struct intel_iommu *iommu; |
e61d98d8 SS |
514 | int map_size; |
515 | u32 ver; | |
c42d9f32 | 516 | static int iommu_allocated = 0; |
43f7392b | 517 | int agaw = 0; |
4ed0d3e6 | 518 | int msagaw = 0; |
c42d9f32 SS |
519 | |
520 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | |
521 | if (!iommu) | |
1886e8a9 | 522 | return -ENOMEM; |
c42d9f32 SS |
523 | |
524 | iommu->seq_id = iommu_allocated++; | |
9d783ba0 | 525 | sprintf (iommu->name, "dmar%d", iommu->seq_id); |
e61d98d8 | 526 | |
5b6985ce | 527 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
e61d98d8 SS |
528 | if (!iommu->reg) { |
529 | printk(KERN_ERR "IOMMU: can't map the region\n"); | |
530 | goto error; | |
531 | } | |
532 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | |
533 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | |
534 | ||
43f7392b | 535 | #ifdef CONFIG_DMAR |
1b573683 WH |
536 | agaw = iommu_calculate_agaw(iommu); |
537 | if (agaw < 0) { | |
538 | printk(KERN_ERR | |
4ed0d3e6 FY |
539 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", |
540 | iommu->seq_id); | |
541 | goto error; | |
542 | } | |
543 | msagaw = iommu_calculate_max_sagaw(iommu); | |
544 | if (msagaw < 0) { | |
545 | printk(KERN_ERR | |
546 | "Cannot get a valid max agaw for iommu (seq_id = %d)\n", | |
1b573683 WH |
547 | iommu->seq_id); |
548 | goto error; | |
549 | } | |
43f7392b | 550 | #endif |
1b573683 | 551 | iommu->agaw = agaw; |
4ed0d3e6 | 552 | iommu->msagaw = msagaw; |
1b573683 | 553 | |
e61d98d8 SS |
554 | /* the registers might be more than one page */ |
555 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | |
556 | cap_max_fault_reg_offset(iommu->cap)); | |
5b6985ce FY |
557 | map_size = VTD_PAGE_ALIGN(map_size); |
558 | if (map_size > VTD_PAGE_SIZE) { | |
e61d98d8 SS |
559 | iounmap(iommu->reg); |
560 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | |
561 | if (!iommu->reg) { | |
562 | printk(KERN_ERR "IOMMU: can't map the region\n"); | |
563 | goto error; | |
564 | } | |
565 | } | |
566 | ||
567 | ver = readl(iommu->reg + DMAR_VER_REG); | |
568 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | |
5b6985ce FY |
569 | (unsigned long long)drhd->reg_base_addr, |
570 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | |
571 | (unsigned long long)iommu->cap, | |
572 | (unsigned long long)iommu->ecap); | |
e61d98d8 SS |
573 | |
574 | spin_lock_init(&iommu->register_lock); | |
575 | ||
576 | drhd->iommu = iommu; | |
1886e8a9 | 577 | return 0; |
e61d98d8 SS |
578 | error: |
579 | kfree(iommu); | |
1886e8a9 | 580 | return -1; |
e61d98d8 SS |
581 | } |
582 | ||
583 | void free_iommu(struct intel_iommu *iommu) | |
584 | { | |
585 | if (!iommu) | |
586 | return; | |
587 | ||
588 | #ifdef CONFIG_DMAR | |
589 | free_dmar_iommu(iommu); | |
590 | #endif | |
591 | ||
592 | if (iommu->reg) | |
593 | iounmap(iommu->reg); | |
594 | kfree(iommu); | |
595 | } | |
fe962e90 SS |
596 | |
597 | /* | |
598 | * Reclaim all the submitted descriptors which have completed its work. | |
599 | */ | |
600 | static inline void reclaim_free_desc(struct q_inval *qi) | |
601 | { | |
602 | while (qi->desc_status[qi->free_tail] == QI_DONE) { | |
603 | qi->desc_status[qi->free_tail] = QI_FREE; | |
604 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | |
605 | qi->free_cnt++; | |
606 | } | |
607 | } | |
608 | ||
704126ad YZ |
609 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
610 | { | |
611 | u32 fault; | |
612 | int head; | |
613 | struct q_inval *qi = iommu->qi; | |
614 | int wait_index = (index + 1) % QI_LENGTH; | |
615 | ||
616 | fault = readl(iommu->reg + DMAR_FSTS_REG); | |
617 | ||
618 | /* | |
619 | * If IQE happens, the head points to the descriptor associated | |
620 | * with the error. No new descriptors are fetched until the IQE | |
621 | * is cleared. | |
622 | */ | |
623 | if (fault & DMA_FSTS_IQE) { | |
624 | head = readl(iommu->reg + DMAR_IQH_REG); | |
625 | if ((head >> 4) == index) { | |
626 | memcpy(&qi->desc[index], &qi->desc[wait_index], | |
627 | sizeof(struct qi_desc)); | |
628 | __iommu_flush_cache(iommu, &qi->desc[index], | |
629 | sizeof(struct qi_desc)); | |
630 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | |
631 | return -EINVAL; | |
632 | } | |
633 | } | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
fe962e90 SS |
638 | /* |
639 | * Submit the queued invalidation descriptor to the remapping | |
640 | * hardware unit and wait for its completion. | |
641 | */ | |
704126ad | 642 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
fe962e90 | 643 | { |
704126ad | 644 | int rc = 0; |
fe962e90 SS |
645 | struct q_inval *qi = iommu->qi; |
646 | struct qi_desc *hw, wait_desc; | |
647 | int wait_index, index; | |
648 | unsigned long flags; | |
649 | ||
650 | if (!qi) | |
704126ad | 651 | return 0; |
fe962e90 SS |
652 | |
653 | hw = qi->desc; | |
654 | ||
f05810c9 | 655 | spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 | 656 | while (qi->free_cnt < 3) { |
f05810c9 | 657 | spin_unlock_irqrestore(&qi->q_lock, flags); |
fe962e90 | 658 | cpu_relax(); |
f05810c9 | 659 | spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 SS |
660 | } |
661 | ||
662 | index = qi->free_head; | |
663 | wait_index = (index + 1) % QI_LENGTH; | |
664 | ||
665 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | |
666 | ||
667 | hw[index] = *desc; | |
668 | ||
704126ad YZ |
669 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
670 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | |
fe962e90 SS |
671 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
672 | ||
673 | hw[wait_index] = wait_desc; | |
674 | ||
675 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | |
676 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | |
677 | ||
678 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | |
679 | qi->free_cnt -= 2; | |
680 | ||
fe962e90 SS |
681 | /* |
682 | * update the HW tail register indicating the presence of | |
683 | * new descriptors. | |
684 | */ | |
685 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | |
fe962e90 SS |
686 | |
687 | while (qi->desc_status[wait_index] != QI_DONE) { | |
f05810c9 SS |
688 | /* |
689 | * We will leave the interrupts disabled, to prevent interrupt | |
690 | * context to queue another cmd while a cmd is already submitted | |
691 | * and waiting for completion on this cpu. This is to avoid | |
692 | * a deadlock where the interrupt context can wait indefinitely | |
693 | * for free slots in the queue. | |
694 | */ | |
704126ad YZ |
695 | rc = qi_check_fault(iommu, index); |
696 | if (rc) | |
697 | goto out; | |
698 | ||
fe962e90 SS |
699 | spin_unlock(&qi->q_lock); |
700 | cpu_relax(); | |
701 | spin_lock(&qi->q_lock); | |
702 | } | |
704126ad YZ |
703 | out: |
704 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; | |
fe962e90 SS |
705 | |
706 | reclaim_free_desc(qi); | |
f05810c9 | 707 | spin_unlock_irqrestore(&qi->q_lock, flags); |
704126ad YZ |
708 | |
709 | return rc; | |
fe962e90 SS |
710 | } |
711 | ||
712 | /* | |
713 | * Flush the global interrupt entry cache. | |
714 | */ | |
715 | void qi_global_iec(struct intel_iommu *iommu) | |
716 | { | |
717 | struct qi_desc desc; | |
718 | ||
719 | desc.low = QI_IEC_TYPE; | |
720 | desc.high = 0; | |
721 | ||
704126ad | 722 | /* should never fail */ |
fe962e90 SS |
723 | qi_submit_sync(&desc, iommu); |
724 | } | |
725 | ||
3481f210 YS |
726 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
727 | u64 type, int non_present_entry_flush) | |
728 | { | |
3481f210 YS |
729 | struct qi_desc desc; |
730 | ||
731 | if (non_present_entry_flush) { | |
732 | if (!cap_caching_mode(iommu->cap)) | |
733 | return 1; | |
734 | else | |
735 | did = 0; | |
736 | } | |
737 | ||
738 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | |
739 | | QI_CC_GRAN(type) | QI_CC_TYPE; | |
740 | desc.high = 0; | |
741 | ||
704126ad | 742 | return qi_submit_sync(&desc, iommu); |
3481f210 YS |
743 | } |
744 | ||
745 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |
746 | unsigned int size_order, u64 type, | |
747 | int non_present_entry_flush) | |
748 | { | |
749 | u8 dw = 0, dr = 0; | |
750 | ||
751 | struct qi_desc desc; | |
752 | int ih = 0; | |
753 | ||
754 | if (non_present_entry_flush) { | |
755 | if (!cap_caching_mode(iommu->cap)) | |
756 | return 1; | |
757 | else | |
758 | did = 0; | |
759 | } | |
760 | ||
761 | if (cap_write_drain(iommu->cap)) | |
762 | dw = 1; | |
763 | ||
764 | if (cap_read_drain(iommu->cap)) | |
765 | dr = 1; | |
766 | ||
767 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | |
768 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | |
769 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | |
770 | | QI_IOTLB_AM(size_order); | |
771 | ||
704126ad | 772 | return qi_submit_sync(&desc, iommu); |
3481f210 YS |
773 | } |
774 | ||
eba67e5d SS |
775 | /* |
776 | * Disable Queued Invalidation interface. | |
777 | */ | |
778 | void dmar_disable_qi(struct intel_iommu *iommu) | |
779 | { | |
780 | unsigned long flags; | |
781 | u32 sts; | |
782 | cycles_t start_time = get_cycles(); | |
783 | ||
784 | if (!ecap_qis(iommu->ecap)) | |
785 | return; | |
786 | ||
787 | spin_lock_irqsave(&iommu->register_lock, flags); | |
788 | ||
789 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
790 | if (!(sts & DMA_GSTS_QIES)) | |
791 | goto end; | |
792 | ||
793 | /* | |
794 | * Give a chance to HW to complete the pending invalidation requests. | |
795 | */ | |
796 | while ((readl(iommu->reg + DMAR_IQT_REG) != | |
797 | readl(iommu->reg + DMAR_IQH_REG)) && | |
798 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) | |
799 | cpu_relax(); | |
800 | ||
801 | iommu->gcmd &= ~DMA_GCMD_QIE; | |
802 | ||
803 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
804 | ||
805 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | |
806 | !(sts & DMA_GSTS_QIES), sts); | |
807 | end: | |
808 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
809 | } | |
810 | ||
eb4a52bc FY |
811 | /* |
812 | * Enable queued invalidation. | |
813 | */ | |
814 | static void __dmar_enable_qi(struct intel_iommu *iommu) | |
815 | { | |
816 | u32 cmd, sts; | |
817 | unsigned long flags; | |
818 | struct q_inval *qi = iommu->qi; | |
819 | ||
820 | qi->free_head = qi->free_tail = 0; | |
821 | qi->free_cnt = QI_LENGTH; | |
822 | ||
823 | spin_lock_irqsave(&iommu->register_lock, flags); | |
824 | ||
825 | /* write zero to the tail reg */ | |
826 | writel(0, iommu->reg + DMAR_IQT_REG); | |
827 | ||
828 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | |
829 | ||
830 | cmd = iommu->gcmd | DMA_GCMD_QIE; | |
831 | iommu->gcmd |= DMA_GCMD_QIE; | |
832 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | |
833 | ||
834 | /* Make sure hardware complete it */ | |
835 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | |
836 | ||
837 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
838 | } | |
839 | ||
fe962e90 SS |
840 | /* |
841 | * Enable Queued Invalidation interface. This is a must to support | |
842 | * interrupt-remapping. Also used by DMA-remapping, which replaces | |
843 | * register based IOTLB invalidation. | |
844 | */ | |
845 | int dmar_enable_qi(struct intel_iommu *iommu) | |
846 | { | |
fe962e90 SS |
847 | struct q_inval *qi; |
848 | ||
849 | if (!ecap_qis(iommu->ecap)) | |
850 | return -ENOENT; | |
851 | ||
852 | /* | |
853 | * queued invalidation is already setup and enabled. | |
854 | */ | |
855 | if (iommu->qi) | |
856 | return 0; | |
857 | ||
fa4b57cc | 858 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); |
fe962e90 SS |
859 | if (!iommu->qi) |
860 | return -ENOMEM; | |
861 | ||
862 | qi = iommu->qi; | |
863 | ||
fa4b57cc | 864 | qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC)); |
fe962e90 SS |
865 | if (!qi->desc) { |
866 | kfree(qi); | |
867 | iommu->qi = 0; | |
868 | return -ENOMEM; | |
869 | } | |
870 | ||
fa4b57cc | 871 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
fe962e90 SS |
872 | if (!qi->desc_status) { |
873 | free_page((unsigned long) qi->desc); | |
874 | kfree(qi); | |
875 | iommu->qi = 0; | |
876 | return -ENOMEM; | |
877 | } | |
878 | ||
879 | qi->free_head = qi->free_tail = 0; | |
880 | qi->free_cnt = QI_LENGTH; | |
881 | ||
882 | spin_lock_init(&qi->q_lock); | |
883 | ||
eb4a52bc | 884 | __dmar_enable_qi(iommu); |
fe962e90 SS |
885 | |
886 | return 0; | |
887 | } | |
0ac2491f SS |
888 | |
889 | /* iommu interrupt handling. Most stuff are MSI-like. */ | |
890 | ||
9d783ba0 SS |
891 | enum faulttype { |
892 | DMA_REMAP, | |
893 | INTR_REMAP, | |
894 | UNKNOWN, | |
895 | }; | |
896 | ||
897 | static const char *dma_remap_fault_reasons[] = | |
0ac2491f SS |
898 | { |
899 | "Software", | |
900 | "Present bit in root entry is clear", | |
901 | "Present bit in context entry is clear", | |
902 | "Invalid context entry", | |
903 | "Access beyond MGAW", | |
904 | "PTE Write access is not set", | |
905 | "PTE Read access is not set", | |
906 | "Next page table ptr is invalid", | |
907 | "Root table address invalid", | |
908 | "Context table ptr is invalid", | |
909 | "non-zero reserved fields in RTP", | |
910 | "non-zero reserved fields in CTP", | |
911 | "non-zero reserved fields in PTE", | |
912 | }; | |
9d783ba0 SS |
913 | |
914 | static const char *intr_remap_fault_reasons[] = | |
915 | { | |
916 | "Detected reserved fields in the decoded interrupt-remapped request", | |
917 | "Interrupt index exceeded the interrupt-remapping table size", | |
918 | "Present field in the IRTE entry is clear", | |
919 | "Error accessing interrupt-remapping table pointed by IRTA_REG", | |
920 | "Detected reserved fields in the IRTE entry", | |
921 | "Blocked a compatibility format interrupt request", | |
922 | "Blocked an interrupt request due to source-id verification failure", | |
923 | }; | |
924 | ||
0ac2491f SS |
925 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) |
926 | ||
9d783ba0 | 927 | const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) |
0ac2491f | 928 | { |
9d783ba0 SS |
929 | if (fault_reason >= 0x20 && (fault_reason <= 0x20 + |
930 | ARRAY_SIZE(intr_remap_fault_reasons))) { | |
931 | *fault_type = INTR_REMAP; | |
932 | return intr_remap_fault_reasons[fault_reason - 0x20]; | |
933 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { | |
934 | *fault_type = DMA_REMAP; | |
935 | return dma_remap_fault_reasons[fault_reason]; | |
936 | } else { | |
937 | *fault_type = UNKNOWN; | |
0ac2491f | 938 | return "Unknown"; |
9d783ba0 | 939 | } |
0ac2491f SS |
940 | } |
941 | ||
942 | void dmar_msi_unmask(unsigned int irq) | |
943 | { | |
944 | struct intel_iommu *iommu = get_irq_data(irq); | |
945 | unsigned long flag; | |
946 | ||
947 | /* unmask it */ | |
948 | spin_lock_irqsave(&iommu->register_lock, flag); | |
949 | writel(0, iommu->reg + DMAR_FECTL_REG); | |
950 | /* Read a reg to force flush the post write */ | |
951 | readl(iommu->reg + DMAR_FECTL_REG); | |
952 | spin_unlock_irqrestore(&iommu->register_lock, flag); | |
953 | } | |
954 | ||
955 | void dmar_msi_mask(unsigned int irq) | |
956 | { | |
957 | unsigned long flag; | |
958 | struct intel_iommu *iommu = get_irq_data(irq); | |
959 | ||
960 | /* mask it */ | |
961 | spin_lock_irqsave(&iommu->register_lock, flag); | |
962 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | |
963 | /* Read a reg to force flush the post write */ | |
964 | readl(iommu->reg + DMAR_FECTL_REG); | |
965 | spin_unlock_irqrestore(&iommu->register_lock, flag); | |
966 | } | |
967 | ||
968 | void dmar_msi_write(int irq, struct msi_msg *msg) | |
969 | { | |
970 | struct intel_iommu *iommu = get_irq_data(irq); | |
971 | unsigned long flag; | |
972 | ||
973 | spin_lock_irqsave(&iommu->register_lock, flag); | |
974 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | |
975 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | |
976 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | |
977 | spin_unlock_irqrestore(&iommu->register_lock, flag); | |
978 | } | |
979 | ||
980 | void dmar_msi_read(int irq, struct msi_msg *msg) | |
981 | { | |
982 | struct intel_iommu *iommu = get_irq_data(irq); | |
983 | unsigned long flag; | |
984 | ||
985 | spin_lock_irqsave(&iommu->register_lock, flag); | |
986 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | |
987 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | |
988 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | |
989 | spin_unlock_irqrestore(&iommu->register_lock, flag); | |
990 | } | |
991 | ||
992 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | |
993 | u8 fault_reason, u16 source_id, unsigned long long addr) | |
994 | { | |
995 | const char *reason; | |
9d783ba0 | 996 | int fault_type; |
0ac2491f | 997 | |
9d783ba0 | 998 | reason = dmar_get_fault_reason(fault_reason, &fault_type); |
0ac2491f | 999 | |
9d783ba0 SS |
1000 | if (fault_type == INTR_REMAP) |
1001 | printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " | |
1002 | "fault index %llx\n" | |
1003 | "INTR-REMAP:[fault reason %02d] %s\n", | |
1004 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | |
1005 | PCI_FUNC(source_id & 0xFF), addr >> 48, | |
1006 | fault_reason, reason); | |
1007 | else | |
1008 | printk(KERN_ERR | |
1009 | "DMAR:[%s] Request device [%02x:%02x.%d] " | |
1010 | "fault addr %llx \n" | |
1011 | "DMAR:[fault reason %02d] %s\n", | |
1012 | (type ? "DMA Read" : "DMA Write"), | |
1013 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | |
1014 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | |
0ac2491f SS |
1015 | return 0; |
1016 | } | |
1017 | ||
1018 | #define PRIMARY_FAULT_REG_LEN (16) | |
1531a6a6 | 1019 | irqreturn_t dmar_fault(int irq, void *dev_id) |
0ac2491f SS |
1020 | { |
1021 | struct intel_iommu *iommu = dev_id; | |
1022 | int reg, fault_index; | |
1023 | u32 fault_status; | |
1024 | unsigned long flag; | |
1025 | ||
1026 | spin_lock_irqsave(&iommu->register_lock, flag); | |
1027 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | |
9d783ba0 SS |
1028 | if (fault_status) |
1029 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", | |
1030 | fault_status); | |
0ac2491f SS |
1031 | |
1032 | /* TBD: ignore advanced fault log currently */ | |
1033 | if (!(fault_status & DMA_FSTS_PPF)) | |
9d783ba0 | 1034 | goto clear_rest; |
0ac2491f SS |
1035 | |
1036 | fault_index = dma_fsts_fault_record_index(fault_status); | |
1037 | reg = cap_fault_reg_offset(iommu->cap); | |
1038 | while (1) { | |
1039 | u8 fault_reason; | |
1040 | u16 source_id; | |
1041 | u64 guest_addr; | |
1042 | int type; | |
1043 | u32 data; | |
1044 | ||
1045 | /* highest 32 bits */ | |
1046 | data = readl(iommu->reg + reg + | |
1047 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | |
1048 | if (!(data & DMA_FRCD_F)) | |
1049 | break; | |
1050 | ||
1051 | fault_reason = dma_frcd_fault_reason(data); | |
1052 | type = dma_frcd_type(data); | |
1053 | ||
1054 | data = readl(iommu->reg + reg + | |
1055 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | |
1056 | source_id = dma_frcd_source_id(data); | |
1057 | ||
1058 | guest_addr = dmar_readq(iommu->reg + reg + | |
1059 | fault_index * PRIMARY_FAULT_REG_LEN); | |
1060 | guest_addr = dma_frcd_page_addr(guest_addr); | |
1061 | /* clear the fault */ | |
1062 | writel(DMA_FRCD_F, iommu->reg + reg + | |
1063 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | |
1064 | ||
1065 | spin_unlock_irqrestore(&iommu->register_lock, flag); | |
1066 | ||
1067 | dmar_fault_do_one(iommu, type, fault_reason, | |
1068 | source_id, guest_addr); | |
1069 | ||
1070 | fault_index++; | |
1071 | if (fault_index > cap_num_fault_regs(iommu->cap)) | |
1072 | fault_index = 0; | |
1073 | spin_lock_irqsave(&iommu->register_lock, flag); | |
1074 | } | |
9d783ba0 SS |
1075 | clear_rest: |
1076 | /* clear all the other faults */ | |
0ac2491f | 1077 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
9d783ba0 | 1078 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
0ac2491f SS |
1079 | |
1080 | spin_unlock_irqrestore(&iommu->register_lock, flag); | |
1081 | return IRQ_HANDLED; | |
1082 | } | |
1083 | ||
1084 | int dmar_set_interrupt(struct intel_iommu *iommu) | |
1085 | { | |
1086 | int irq, ret; | |
1087 | ||
9d783ba0 SS |
1088 | /* |
1089 | * Check if the fault interrupt is already initialized. | |
1090 | */ | |
1091 | if (iommu->irq) | |
1092 | return 0; | |
1093 | ||
0ac2491f SS |
1094 | irq = create_irq(); |
1095 | if (!irq) { | |
1096 | printk(KERN_ERR "IOMMU: no free vectors\n"); | |
1097 | return -EINVAL; | |
1098 | } | |
1099 | ||
1100 | set_irq_data(irq, iommu); | |
1101 | iommu->irq = irq; | |
1102 | ||
1103 | ret = arch_setup_dmar_msi(irq); | |
1104 | if (ret) { | |
1105 | set_irq_data(irq, NULL); | |
1106 | iommu->irq = 0; | |
1107 | destroy_irq(irq); | |
1108 | return 0; | |
1109 | } | |
1110 | ||
0ac2491f SS |
1111 | ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); |
1112 | if (ret) | |
1113 | printk(KERN_ERR "IOMMU: can't request irq\n"); | |
1114 | return ret; | |
1115 | } | |
9d783ba0 SS |
1116 | |
1117 | int __init enable_drhd_fault_handling(void) | |
1118 | { | |
1119 | struct dmar_drhd_unit *drhd; | |
1120 | ||
1121 | /* | |
1122 | * Enable fault control interrupt. | |
1123 | */ | |
1124 | for_each_drhd_unit(drhd) { | |
1125 | int ret; | |
1126 | struct intel_iommu *iommu = drhd->iommu; | |
1127 | ret = dmar_set_interrupt(iommu); | |
1128 | ||
1129 | if (ret) { | |
1130 | printk(KERN_ERR "DRHD %Lx: failed to enable fault, " | |
1131 | " interrupt, ret %d\n", | |
1132 | (unsigned long long)drhd->reg_base_addr, ret); | |
1133 | return -1; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | return 0; | |
1138 | } | |
eb4a52bc FY |
1139 | |
1140 | /* | |
1141 | * Re-enable Queued Invalidation interface. | |
1142 | */ | |
1143 | int dmar_reenable_qi(struct intel_iommu *iommu) | |
1144 | { | |
1145 | if (!ecap_qis(iommu->ecap)) | |
1146 | return -ENOENT; | |
1147 | ||
1148 | if (!iommu->qi) | |
1149 | return -ENOENT; | |
1150 | ||
1151 | /* | |
1152 | * First disable queued invalidation. | |
1153 | */ | |
1154 | dmar_disable_qi(iommu); | |
1155 | /* | |
1156 | * Then enable queued invalidation again. Since there is no pending | |
1157 | * invalidation requests now, it's safe to re-enable queued | |
1158 | * invalidation. | |
1159 | */ | |
1160 | __dmar_enable_qi(iommu); | |
1161 | ||
1162 | return 0; | |
1163 | } |