iommu/dmar: Reserve mmio space used by the IOMMU, if the BIOS forgets to
[linux-2.6-block.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
5a0e3ad6 38#include <linux/slab.h>
8a8f422d 39#include <asm/irq_remapping.h>
4db77ff3 40#include <asm/iommu_table.h>
10e5247f 41
a192a958 42#define PREFIX "DMAR: "
10e5247f
KA
43
44/* No locks are needed as DMA remapping hardware unit
45 * list is constructed at boot time and hotplug of
46 * these units are not supported by the architecture.
47 */
48LIST_HEAD(dmar_drhd_units);
10e5247f 49
41750d31 50struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 51static acpi_size dmar_tbl_size;
10e5247f
KA
52
53static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
54{
55 /*
56 * add INCLUDE_ALL at the tail, so scan the list will find it at
57 * the very end.
58 */
59 if (drhd->include_all)
60 list_add_tail(&drhd->list, &dmar_drhd_units);
61 else
62 list_add(&drhd->list, &dmar_drhd_units);
63}
64
10e5247f
KA
65static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
66 struct pci_dev **dev, u16 segment)
67{
68 struct pci_bus *bus;
69 struct pci_dev *pdev = NULL;
70 struct acpi_dmar_pci_path *path;
71 int count;
72
73 bus = pci_find_bus(segment, scope->bus);
74 path = (struct acpi_dmar_pci_path *)(scope + 1);
75 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
76 / sizeof(struct acpi_dmar_pci_path);
77
78 while (count) {
79 if (pdev)
80 pci_dev_put(pdev);
81 /*
82 * Some BIOSes list non-exist devices in DMAR table, just
83 * ignore it
84 */
85 if (!bus) {
bf947fcb
DD
86 pr_warn(PREFIX "Device scope bus [%d] not found\n",
87 scope->bus);
10e5247f
KA
88 break;
89 }
90 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 if (!pdev) {
bf947fcb
DD
92 pr_warn(PREFIX "Device scope device"
93 "[%04x:%02x:%02x.%02x] not found\n",
10e5247f
KA
94 segment, bus->number, path->dev, path->fn);
95 break;
96 }
97 path ++;
98 count --;
99 bus = pdev->subordinate;
100 }
101 if (!pdev) {
bf947fcb
DD
102 pr_warn(PREFIX
103 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment, scope->bus, path->dev, path->fn);
10e5247f
KA
105 *dev = NULL;
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
bf947fcb 112 pr_warn(PREFIX "Device scope type does not match for %s\n",
10e5247f
KA
113 pci_name(pdev));
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
318fe7df
SS
120int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
10e5247f
KA
122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
5715f0f9 134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
bf947fcb 135 pr_warn(PREFIX "Unsupported device scope\n");
5715f0f9 136 }
10e5247f
KA
137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
10e5247f 177
e523b38e 178 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
1886e8a9 183 dmaru->hdr = header;
10e5247f 184 dmaru->reg_base_addr = drhd->address;
276dbf99 185 dmaru->segment = drhd->segment;
10e5247f
KA
186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
1886e8a9
SS
188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
f82851a8 197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
198{
199 struct acpi_dmar_hardware_unit *drhd;
f82851a8 200 int ret = 0;
1886e8a9
SS
201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
2e824f79
YZ
204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 208 ((void *)drhd) + drhd->header.length,
10e5247f
KA
209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
1c7d1bca 211 if (ret) {
1886e8a9 212 list_del(&dmaru->list);
10e5247f 213 kfree(dmaru);
1886e8a9 214 }
10e5247f
KA
215 return ret;
216}
217
aa697079 218#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
219static int __init
220dmar_parse_one_rhsa(struct acpi_dmar_header *header)
221{
222 struct acpi_dmar_rhsa *rhsa;
223 struct dmar_drhd_unit *drhd;
224
225 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 226 for_each_drhd_unit(drhd) {
ee34b32d
SS
227 if (drhd->reg_base_addr == rhsa->base_address) {
228 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
229
230 if (!node_online(node))
231 node = -1;
232 drhd->iommu->node = node;
aa697079
DW
233 return 0;
234 }
ee34b32d 235 }
fd0c8894
BH
236 WARN_TAINT(
237 1, TAINT_FIRMWARE_WORKAROUND,
238 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
239 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
240 drhd->reg_base_addr,
241 dmi_get_system_info(DMI_BIOS_VENDOR),
242 dmi_get_system_info(DMI_BIOS_VERSION),
243 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 244
aa697079 245 return 0;
ee34b32d 246}
aa697079 247#endif
ee34b32d 248
10e5247f
KA
249static void __init
250dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
251{
252 struct acpi_dmar_hardware_unit *drhd;
253 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 254 struct acpi_dmar_atsr *atsr;
17b60977 255 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
256
257 switch (header->type) {
258 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
259 drhd = container_of(header, struct acpi_dmar_hardware_unit,
260 header);
bf947fcb 261 pr_info(PREFIX "DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 262 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
263 break;
264 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
265 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
266 header);
bf947fcb 267 pr_info(PREFIX "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
268 (unsigned long long)rmrr->base_address,
269 (unsigned long long)rmrr->end_address);
10e5247f 270 break;
aa5d2b51
YZ
271 case ACPI_DMAR_TYPE_ATSR:
272 atsr = container_of(header, struct acpi_dmar_atsr, header);
bf947fcb 273 pr_info(PREFIX "ATSR flags: %#x\n", atsr->flags);
aa5d2b51 274 break;
17b60977
RD
275 case ACPI_DMAR_HARDWARE_AFFINITY:
276 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
bf947fcb 277 pr_info(PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
278 (unsigned long long)rhsa->base_address,
279 rhsa->proximity_domain);
280 break;
10e5247f
KA
281 }
282}
283
f6dd5c31
YL
284/**
285 * dmar_table_detect - checks to see if the platform supports DMAR devices
286 */
287static int __init dmar_table_detect(void)
288{
289 acpi_status status = AE_OK;
290
291 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
292 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
293 (struct acpi_table_header **)&dmar_tbl,
294 &dmar_tbl_size);
f6dd5c31
YL
295
296 if (ACPI_SUCCESS(status) && !dmar_tbl) {
bf947fcb 297 pr_warn(PREFIX "Unable to map DMAR\n");
f6dd5c31
YL
298 status = AE_NOT_FOUND;
299 }
300
301 return (ACPI_SUCCESS(status) ? 1 : 0);
302}
aaa9d1dd 303
10e5247f
KA
304/**
305 * parse_dmar_table - parses the DMA reporting table
306 */
307static int __init
308parse_dmar_table(void)
309{
310 struct acpi_table_dmar *dmar;
311 struct acpi_dmar_header *entry_header;
312 int ret = 0;
313
f6dd5c31
YL
314 /*
315 * Do it again, earlier dmar_tbl mapping could be mapped with
316 * fixed map.
317 */
318 dmar_table_detect();
319
a59b50e9
JC
320 /*
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
323 */
324 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
325
10e5247f
KA
326 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (!dmar)
328 return -ENODEV;
329
5b6985ce 330 if (dmar->width < PAGE_SHIFT - 1) {
bf947fcb 331 pr_warn(PREFIX "Invalid DMAR haw\n");
10e5247f
KA
332 return -EINVAL;
333 }
334
bf947fcb 335 pr_info(PREFIX "Host address width %d\n", dmar->width + 1);
10e5247f
KA
336
337 entry_header = (struct acpi_dmar_header *)(dmar + 1);
338 while (((unsigned long)entry_header) <
339 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header->length == 0) {
bf947fcb 342 pr_warn(PREFIX "Invalid 0-length structure\n");
084eb960
TB
343 ret = -EINVAL;
344 break;
345 }
346
10e5247f
KA
347 dmar_table_print_dmar_entry(entry_header);
348
349 switch (entry_header->type) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
351 ret = dmar_parse_one_drhd(entry_header);
352 break;
353 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
354 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
355 break;
356 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 357 ret = dmar_parse_one_atsr(entry_header);
10e5247f 358 break;
17b60977 359 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 360#ifdef CONFIG_ACPI_NUMA
ee34b32d 361 ret = dmar_parse_one_rhsa(entry_header);
aa697079 362#endif
17b60977 363 break;
10e5247f 364 default:
bf947fcb 365 pr_warn(PREFIX "Unknown DMAR structure type %d\n",
4de75cf9 366 entry_header->type);
10e5247f
KA
367 ret = 0; /* for forward compatibility */
368 break;
369 }
370 if (ret)
371 break;
372
373 entry_header = ((void *)entry_header + entry_header->length);
374 }
375 return ret;
376}
377
dda56549 378static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
379 struct pci_dev *dev)
380{
381 int index;
382
383 while (dev) {
384 for (index = 0; index < cnt; index++)
385 if (dev == devices[index])
386 return 1;
387
388 /* Check our parent */
389 dev = dev->bus->self;
390 }
391
392 return 0;
393}
394
395struct dmar_drhd_unit *
396dmar_find_matched_drhd_unit(struct pci_dev *dev)
397{
2e824f79
YZ
398 struct dmar_drhd_unit *dmaru = NULL;
399 struct acpi_dmar_hardware_unit *drhd;
400
dda56549
Y
401 dev = pci_physfn(dev);
402
2e824f79
YZ
403 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
404 drhd = container_of(dmaru->hdr,
405 struct acpi_dmar_hardware_unit,
406 header);
407
408 if (dmaru->include_all &&
409 drhd->segment == pci_domain_nr(dev->bus))
410 return dmaru;
e61d98d8 411
2e824f79
YZ
412 if (dmar_pci_device_match(dmaru->devices,
413 dmaru->devices_cnt, dev))
414 return dmaru;
e61d98d8
SS
415 }
416
417 return NULL;
418}
419
1886e8a9
SS
420int __init dmar_dev_scope_init(void)
421{
c2c7286a 422 static int dmar_dev_scope_initialized;
04e2ea67 423 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
424 int ret = -ENODEV;
425
c2c7286a
SS
426 if (dmar_dev_scope_initialized)
427 return dmar_dev_scope_initialized;
428
318fe7df
SS
429 if (list_empty(&dmar_drhd_units))
430 goto fail;
431
04e2ea67 432 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
433 ret = dmar_parse_dev(drhd);
434 if (ret)
c2c7286a 435 goto fail;
1886e8a9
SS
436 }
437
318fe7df
SS
438 ret = dmar_parse_rmrr_atsr_dev();
439 if (ret)
440 goto fail;
1886e8a9 441
c2c7286a
SS
442 dmar_dev_scope_initialized = 1;
443 return 0;
444
445fail:
446 dmar_dev_scope_initialized = ret;
1886e8a9
SS
447 return ret;
448}
449
10e5247f
KA
450
451int __init dmar_table_init(void)
452{
1886e8a9 453 static int dmar_table_initialized;
093f87d2
FY
454 int ret;
455
1886e8a9
SS
456 if (dmar_table_initialized)
457 return 0;
458
459 dmar_table_initialized = 1;
460
093f87d2
FY
461 ret = parse_dmar_table();
462 if (ret) {
1886e8a9 463 if (ret != -ENODEV)
bf947fcb 464 pr_info(PREFIX "parse DMAR table failure.\n");
093f87d2
FY
465 return ret;
466 }
467
10e5247f 468 if (list_empty(&dmar_drhd_units)) {
bf947fcb 469 pr_info(PREFIX "No DMAR devices found\n");
10e5247f
KA
470 return -ENODEV;
471 }
093f87d2 472
10e5247f
KA
473 return 0;
474}
475
3a8663ee
BH
476static void warn_invalid_dmar(u64 addr, const char *message)
477{
fd0c8894
BH
478 WARN_TAINT_ONCE(
479 1, TAINT_FIRMWARE_WORKAROUND,
480 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
481 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
482 addr, message,
483 dmi_get_system_info(DMI_BIOS_VENDOR),
484 dmi_get_system_info(DMI_BIOS_VERSION),
485 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 486}
6ecbf01c 487
86cf898e
DW
488int __init check_zero_address(void)
489{
490 struct acpi_table_dmar *dmar;
491 struct acpi_dmar_header *entry_header;
492 struct acpi_dmar_hardware_unit *drhd;
493
494 dmar = (struct acpi_table_dmar *)dmar_tbl;
495 entry_header = (struct acpi_dmar_header *)(dmar + 1);
496
497 while (((unsigned long)entry_header) <
498 (((unsigned long)dmar) + dmar_tbl->length)) {
499 /* Avoid looping forever on bad ACPI tables */
500 if (entry_header->length == 0) {
bf947fcb 501 pr_warn(PREFIX "Invalid 0-length structure\n");
86cf898e
DW
502 return 0;
503 }
504
505 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
506 void __iomem *addr;
507 u64 cap, ecap;
508
86cf898e
DW
509 drhd = (void *)entry_header;
510 if (!drhd->address) {
3a8663ee 511 warn_invalid_dmar(0, "");
2c992208
CW
512 goto failed;
513 }
514
515 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
516 if (!addr ) {
517 printk("IOMMU: can't validate: %llx\n", drhd->address);
518 goto failed;
519 }
520 cap = dmar_readq(addr + DMAR_CAP_REG);
521 ecap = dmar_readq(addr + DMAR_ECAP_REG);
522 early_iounmap(addr, VTD_PAGE_SIZE);
523 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
524 warn_invalid_dmar(drhd->address,
525 " returns all ones");
2c992208 526 goto failed;
86cf898e 527 }
86cf898e
DW
528 }
529
530 entry_header = ((void *)entry_header + entry_header->length);
531 }
532 return 1;
2c992208
CW
533
534failed:
2c992208 535 return 0;
86cf898e
DW
536}
537
480125ba 538int __init detect_intel_iommu(void)
2ae21010
SS
539{
540 int ret;
541
f6dd5c31 542 ret = dmar_table_detect();
86cf898e
DW
543 if (ret)
544 ret = check_zero_address();
2ae21010 545 {
1cb11583 546 struct acpi_table_dmar *dmar;
b3a530e4 547
1cb11583 548 dmar = (struct acpi_table_dmar *) dmar_tbl;
f5d1b97b 549
95a02e97 550 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
f5d1b97b 551 dmar->flags & 0x1)
bf947fcb
DD
552 pr_info("Queued invalidation will be enabled to "
553 "support x2apic and Intr-remapping.\n");
f5d1b97b 554
11bd04f6 555 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 556 iommu_detected = 1;
5d990b62
CW
557 /* Make sure ACS will be enabled */
558 pci_request_acs();
559 }
f5d1b97b 560
9d5ce73a
FT
561#ifdef CONFIG_X86
562 if (ret)
563 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 564#endif
cacd4213 565 }
8e1568f3 566 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 567 dmar_tbl = NULL;
480125ba 568
4db77ff3 569 return ret ? 1 : -ENODEV;
2ae21010
SS
570}
571
572
6f5cf521
DD
573static void unmap_iommu(struct intel_iommu *iommu)
574{
575 iounmap(iommu->reg);
576 release_mem_region(iommu->reg_phys, iommu->reg_size);
577}
578
579/**
580 * map_iommu: map the iommu's registers
581 * @iommu: the iommu to map
582 * @phys_addr: the physical address of the base resgister
583 *
584 * Memory map the iommu's registers. Start w/ a single page, and
585 * possibly expand if that turns out to be insufficent.
586 */
587static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
588{
589 int map_size, err=0;
590
591 iommu->reg_phys = phys_addr;
592 iommu->reg_size = VTD_PAGE_SIZE;
593
594 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
595 pr_err("IOMMU: can't reserve memory\n");
596 err = -EBUSY;
597 goto out;
598 }
599
600 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
601 if (!iommu->reg) {
602 pr_err("IOMMU: can't map the region\n");
603 err = -ENOMEM;
604 goto release;
605 }
606
607 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
608 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
609
610 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
611 err = -EINVAL;
612 warn_invalid_dmar(phys_addr, " returns all ones");
613 goto unmap;
614 }
615
616 /* the registers might be more than one page */
617 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
618 cap_max_fault_reg_offset(iommu->cap));
619 map_size = VTD_PAGE_ALIGN(map_size);
620 if (map_size > iommu->reg_size) {
621 iounmap(iommu->reg);
622 release_mem_region(iommu->reg_phys, iommu->reg_size);
623 iommu->reg_size = map_size;
624 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
625 iommu->name)) {
626 pr_err("IOMMU: can't reserve memory\n");
627 err = -EBUSY;
628 goto out;
629 }
630 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
631 if (!iommu->reg) {
632 pr_err("IOMMU: can't map the region\n");
633 err = -ENOMEM;
634 goto release;
635 }
636 }
637 err = 0;
638 goto out;
639
640unmap:
641 iounmap(iommu->reg);
642release:
643 release_mem_region(iommu->reg_phys, iommu->reg_size);
644out:
645 return err;
646}
647
1886e8a9 648int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 649{
c42d9f32 650 struct intel_iommu *iommu;
e61d98d8 651 u32 ver;
c42d9f32 652 static int iommu_allocated = 0;
43f7392b 653 int agaw = 0;
4ed0d3e6 654 int msagaw = 0;
6f5cf521 655 int err;
c42d9f32 656
6ecbf01c 657 if (!drhd->reg_base_addr) {
3a8663ee 658 warn_invalid_dmar(0, "");
6ecbf01c
DW
659 return -EINVAL;
660 }
661
c42d9f32
SS
662 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
663 if (!iommu)
1886e8a9 664 return -ENOMEM;
c42d9f32
SS
665
666 iommu->seq_id = iommu_allocated++;
9d783ba0 667 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 668
6f5cf521
DD
669 err = map_iommu(iommu, drhd->reg_base_addr);
670 if (err) {
671 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
672 goto error;
673 }
0815565a 674
6f5cf521 675 err = -EINVAL;
1b573683
WH
676 agaw = iommu_calculate_agaw(iommu);
677 if (agaw < 0) {
bf947fcb
DD
678 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
679 iommu->seq_id);
0815565a 680 goto err_unmap;
4ed0d3e6
FY
681 }
682 msagaw = iommu_calculate_max_sagaw(iommu);
683 if (msagaw < 0) {
bf947fcb 684 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 685 iommu->seq_id);
0815565a 686 goto err_unmap;
1b573683
WH
687 }
688 iommu->agaw = agaw;
4ed0d3e6 689 iommu->msagaw = msagaw;
1b573683 690
ee34b32d
SS
691 iommu->node = -1;
692
e61d98d8 693 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
694 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
695 iommu->seq_id,
5b6985ce
FY
696 (unsigned long long)drhd->reg_base_addr,
697 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
698 (unsigned long long)iommu->cap,
699 (unsigned long long)iommu->ecap);
e61d98d8 700
1f5b3c3f 701 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
702
703 drhd->iommu = iommu;
1886e8a9 704 return 0;
0815565a
DW
705
706 err_unmap:
6f5cf521 707 unmap_iommu(iommu);
0815565a 708 error:
e61d98d8 709 kfree(iommu);
6f5cf521 710 return err;
e61d98d8
SS
711}
712
713void free_iommu(struct intel_iommu *iommu)
714{
715 if (!iommu)
716 return;
717
e61d98d8 718 free_dmar_iommu(iommu);
e61d98d8
SS
719
720 if (iommu->reg)
6f5cf521
DD
721 unmap_iommu(iommu);
722
e61d98d8
SS
723 kfree(iommu);
724}
fe962e90
SS
725
726/*
727 * Reclaim all the submitted descriptors which have completed its work.
728 */
729static inline void reclaim_free_desc(struct q_inval *qi)
730{
6ba6c3a4
YZ
731 while (qi->desc_status[qi->free_tail] == QI_DONE ||
732 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
733 qi->desc_status[qi->free_tail] = QI_FREE;
734 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
735 qi->free_cnt++;
736 }
737}
738
704126ad
YZ
739static int qi_check_fault(struct intel_iommu *iommu, int index)
740{
741 u32 fault;
6ba6c3a4 742 int head, tail;
704126ad
YZ
743 struct q_inval *qi = iommu->qi;
744 int wait_index = (index + 1) % QI_LENGTH;
745
6ba6c3a4
YZ
746 if (qi->desc_status[wait_index] == QI_ABORT)
747 return -EAGAIN;
748
704126ad
YZ
749 fault = readl(iommu->reg + DMAR_FSTS_REG);
750
751 /*
752 * If IQE happens, the head points to the descriptor associated
753 * with the error. No new descriptors are fetched until the IQE
754 * is cleared.
755 */
756 if (fault & DMA_FSTS_IQE) {
757 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 758 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 759 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
760 "low=%llx, high=%llx\n",
761 (unsigned long long)qi->desc[index].low,
762 (unsigned long long)qi->desc[index].high);
704126ad
YZ
763 memcpy(&qi->desc[index], &qi->desc[wait_index],
764 sizeof(struct qi_desc));
765 __iommu_flush_cache(iommu, &qi->desc[index],
766 sizeof(struct qi_desc));
767 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
768 return -EINVAL;
769 }
770 }
771
6ba6c3a4
YZ
772 /*
773 * If ITE happens, all pending wait_desc commands are aborted.
774 * No new descriptors are fetched until the ITE is cleared.
775 */
776 if (fault & DMA_FSTS_ITE) {
777 head = readl(iommu->reg + DMAR_IQH_REG);
778 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
779 head |= 1;
780 tail = readl(iommu->reg + DMAR_IQT_REG);
781 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
782
783 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
784
785 do {
786 if (qi->desc_status[head] == QI_IN_USE)
787 qi->desc_status[head] = QI_ABORT;
788 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
789 } while (head != tail);
790
791 if (qi->desc_status[wait_index] == QI_ABORT)
792 return -EAGAIN;
793 }
794
795 if (fault & DMA_FSTS_ICE)
796 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
797
704126ad
YZ
798 return 0;
799}
800
fe962e90
SS
801/*
802 * Submit the queued invalidation descriptor to the remapping
803 * hardware unit and wait for its completion.
804 */
704126ad 805int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 806{
6ba6c3a4 807 int rc;
fe962e90
SS
808 struct q_inval *qi = iommu->qi;
809 struct qi_desc *hw, wait_desc;
810 int wait_index, index;
811 unsigned long flags;
812
813 if (!qi)
704126ad 814 return 0;
fe962e90
SS
815
816 hw = qi->desc;
817
6ba6c3a4
YZ
818restart:
819 rc = 0;
820
3b8f4048 821 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 822 while (qi->free_cnt < 3) {
3b8f4048 823 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 824 cpu_relax();
3b8f4048 825 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
826 }
827
828 index = qi->free_head;
829 wait_index = (index + 1) % QI_LENGTH;
830
831 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
832
833 hw[index] = *desc;
834
704126ad
YZ
835 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
836 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
837 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
838
839 hw[wait_index] = wait_desc;
840
841 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
842 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
843
844 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
845 qi->free_cnt -= 2;
846
fe962e90
SS
847 /*
848 * update the HW tail register indicating the presence of
849 * new descriptors.
850 */
6ba6c3a4 851 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
852
853 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
854 /*
855 * We will leave the interrupts disabled, to prevent interrupt
856 * context to queue another cmd while a cmd is already submitted
857 * and waiting for completion on this cpu. This is to avoid
858 * a deadlock where the interrupt context can wait indefinitely
859 * for free slots in the queue.
860 */
704126ad
YZ
861 rc = qi_check_fault(iommu, index);
862 if (rc)
6ba6c3a4 863 break;
704126ad 864
3b8f4048 865 raw_spin_unlock(&qi->q_lock);
fe962e90 866 cpu_relax();
3b8f4048 867 raw_spin_lock(&qi->q_lock);
fe962e90 868 }
6ba6c3a4
YZ
869
870 qi->desc_status[index] = QI_DONE;
fe962e90
SS
871
872 reclaim_free_desc(qi);
3b8f4048 873 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 874
6ba6c3a4
YZ
875 if (rc == -EAGAIN)
876 goto restart;
877
704126ad 878 return rc;
fe962e90
SS
879}
880
881/*
882 * Flush the global interrupt entry cache.
883 */
884void qi_global_iec(struct intel_iommu *iommu)
885{
886 struct qi_desc desc;
887
888 desc.low = QI_IEC_TYPE;
889 desc.high = 0;
890
704126ad 891 /* should never fail */
fe962e90
SS
892 qi_submit_sync(&desc, iommu);
893}
894
4c25a2c1
DW
895void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
896 u64 type)
3481f210 897{
3481f210
YS
898 struct qi_desc desc;
899
3481f210
YS
900 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
901 | QI_CC_GRAN(type) | QI_CC_TYPE;
902 desc.high = 0;
903
4c25a2c1 904 qi_submit_sync(&desc, iommu);
3481f210
YS
905}
906
1f0ef2aa
DW
907void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
908 unsigned int size_order, u64 type)
3481f210
YS
909{
910 u8 dw = 0, dr = 0;
911
912 struct qi_desc desc;
913 int ih = 0;
914
3481f210
YS
915 if (cap_write_drain(iommu->cap))
916 dw = 1;
917
918 if (cap_read_drain(iommu->cap))
919 dr = 1;
920
921 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
922 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
923 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
924 | QI_IOTLB_AM(size_order);
925
1f0ef2aa 926 qi_submit_sync(&desc, iommu);
3481f210
YS
927}
928
6ba6c3a4
YZ
929void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
930 u64 addr, unsigned mask)
931{
932 struct qi_desc desc;
933
934 if (mask) {
935 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
936 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
937 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
938 } else
939 desc.high = QI_DEV_IOTLB_ADDR(addr);
940
941 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
942 qdep = 0;
943
944 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
945 QI_DIOTLB_TYPE;
946
947 qi_submit_sync(&desc, iommu);
948}
949
eba67e5d
SS
950/*
951 * Disable Queued Invalidation interface.
952 */
953void dmar_disable_qi(struct intel_iommu *iommu)
954{
955 unsigned long flags;
956 u32 sts;
957 cycles_t start_time = get_cycles();
958
959 if (!ecap_qis(iommu->ecap))
960 return;
961
1f5b3c3f 962 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
963
964 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
965 if (!(sts & DMA_GSTS_QIES))
966 goto end;
967
968 /*
969 * Give a chance to HW to complete the pending invalidation requests.
970 */
971 while ((readl(iommu->reg + DMAR_IQT_REG) !=
972 readl(iommu->reg + DMAR_IQH_REG)) &&
973 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
974 cpu_relax();
975
976 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
977 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
978
979 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
980 !(sts & DMA_GSTS_QIES), sts);
981end:
1f5b3c3f 982 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
983}
984
eb4a52bc
FY
985/*
986 * Enable queued invalidation.
987 */
988static void __dmar_enable_qi(struct intel_iommu *iommu)
989{
c416daa9 990 u32 sts;
eb4a52bc
FY
991 unsigned long flags;
992 struct q_inval *qi = iommu->qi;
993
994 qi->free_head = qi->free_tail = 0;
995 qi->free_cnt = QI_LENGTH;
996
1f5b3c3f 997 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
998
999 /* write zero to the tail reg */
1000 writel(0, iommu->reg + DMAR_IQT_REG);
1001
1002 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1003
eb4a52bc 1004 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1005 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1006
1007 /* Make sure hardware complete it */
1008 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1009
1f5b3c3f 1010 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1011}
1012
fe962e90
SS
1013/*
1014 * Enable Queued Invalidation interface. This is a must to support
1015 * interrupt-remapping. Also used by DMA-remapping, which replaces
1016 * register based IOTLB invalidation.
1017 */
1018int dmar_enable_qi(struct intel_iommu *iommu)
1019{
fe962e90 1020 struct q_inval *qi;
751cafe3 1021 struct page *desc_page;
fe962e90
SS
1022
1023 if (!ecap_qis(iommu->ecap))
1024 return -ENOENT;
1025
1026 /*
1027 * queued invalidation is already setup and enabled.
1028 */
1029 if (iommu->qi)
1030 return 0;
1031
fa4b57cc 1032 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1033 if (!iommu->qi)
1034 return -ENOMEM;
1035
1036 qi = iommu->qi;
1037
751cafe3
SS
1038
1039 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1040 if (!desc_page) {
fe962e90
SS
1041 kfree(qi);
1042 iommu->qi = 0;
1043 return -ENOMEM;
1044 }
1045
751cafe3
SS
1046 qi->desc = page_address(desc_page);
1047
fa4b57cc 1048 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1049 if (!qi->desc_status) {
1050 free_page((unsigned long) qi->desc);
1051 kfree(qi);
1052 iommu->qi = 0;
1053 return -ENOMEM;
1054 }
1055
1056 qi->free_head = qi->free_tail = 0;
1057 qi->free_cnt = QI_LENGTH;
1058
3b8f4048 1059 raw_spin_lock_init(&qi->q_lock);
fe962e90 1060
eb4a52bc 1061 __dmar_enable_qi(iommu);
fe962e90
SS
1062
1063 return 0;
1064}
0ac2491f
SS
1065
1066/* iommu interrupt handling. Most stuff are MSI-like. */
1067
9d783ba0
SS
1068enum faulttype {
1069 DMA_REMAP,
1070 INTR_REMAP,
1071 UNKNOWN,
1072};
1073
1074static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1075{
1076 "Software",
1077 "Present bit in root entry is clear",
1078 "Present bit in context entry is clear",
1079 "Invalid context entry",
1080 "Access beyond MGAW",
1081 "PTE Write access is not set",
1082 "PTE Read access is not set",
1083 "Next page table ptr is invalid",
1084 "Root table address invalid",
1085 "Context table ptr is invalid",
1086 "non-zero reserved fields in RTP",
1087 "non-zero reserved fields in CTP",
1088 "non-zero reserved fields in PTE",
1089};
9d783ba0 1090
95a02e97 1091static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1092{
1093 "Detected reserved fields in the decoded interrupt-remapped request",
1094 "Interrupt index exceeded the interrupt-remapping table size",
1095 "Present field in the IRTE entry is clear",
1096 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1097 "Detected reserved fields in the IRTE entry",
1098 "Blocked a compatibility format interrupt request",
1099 "Blocked an interrupt request due to source-id verification failure",
1100};
1101
0ac2491f
SS
1102#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1103
9d783ba0 1104const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1105{
fefe1ed1
DC
1106 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1107 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1108 *fault_type = INTR_REMAP;
95a02e97 1109 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1110 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1111 *fault_type = DMA_REMAP;
1112 return dma_remap_fault_reasons[fault_reason];
1113 } else {
1114 *fault_type = UNKNOWN;
0ac2491f 1115 return "Unknown";
9d783ba0 1116 }
0ac2491f
SS
1117}
1118
5c2837fb 1119void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1120{
dced35ae 1121 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1122 unsigned long flag;
1123
1124 /* unmask it */
1f5b3c3f 1125 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1126 writel(0, iommu->reg + DMAR_FECTL_REG);
1127 /* Read a reg to force flush the post write */
1128 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1129 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1130}
1131
5c2837fb 1132void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1133{
1134 unsigned long flag;
dced35ae 1135 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1136
1137 /* mask it */
1f5b3c3f 1138 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1139 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1140 /* Read a reg to force flush the post write */
1141 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1142 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1143}
1144
1145void dmar_msi_write(int irq, struct msi_msg *msg)
1146{
dced35ae 1147 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1148 unsigned long flag;
1149
1f5b3c3f 1150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1151 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1152 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1153 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1155}
1156
1157void dmar_msi_read(int irq, struct msi_msg *msg)
1158{
dced35ae 1159 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1160 unsigned long flag;
1161
1f5b3c3f 1162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1163 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1164 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1165 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1167}
1168
1169static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1170 u8 fault_reason, u16 source_id, unsigned long long addr)
1171{
1172 const char *reason;
9d783ba0 1173 int fault_type;
0ac2491f 1174
9d783ba0 1175 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1176
9d783ba0 1177 if (fault_type == INTR_REMAP)
bf947fcb 1178 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1179 "fault index %llx\n"
1180 "INTR-REMAP:[fault reason %02d] %s\n",
1181 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1182 PCI_FUNC(source_id & 0xFF), addr >> 48,
1183 fault_reason, reason);
1184 else
bf947fcb 1185 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1186 "fault addr %llx \n"
1187 "DMAR:[fault reason %02d] %s\n",
1188 (type ? "DMA Read" : "DMA Write"),
1189 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1190 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1191 return 0;
1192}
1193
1194#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1195irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1196{
1197 struct intel_iommu *iommu = dev_id;
1198 int reg, fault_index;
1199 u32 fault_status;
1200 unsigned long flag;
1201
1f5b3c3f 1202 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1203 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1204 if (fault_status)
bf947fcb 1205 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1206
1207 /* TBD: ignore advanced fault log currently */
1208 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1209 goto clear_rest;
0ac2491f
SS
1210
1211 fault_index = dma_fsts_fault_record_index(fault_status);
1212 reg = cap_fault_reg_offset(iommu->cap);
1213 while (1) {
1214 u8 fault_reason;
1215 u16 source_id;
1216 u64 guest_addr;
1217 int type;
1218 u32 data;
1219
1220 /* highest 32 bits */
1221 data = readl(iommu->reg + reg +
1222 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1223 if (!(data & DMA_FRCD_F))
1224 break;
1225
1226 fault_reason = dma_frcd_fault_reason(data);
1227 type = dma_frcd_type(data);
1228
1229 data = readl(iommu->reg + reg +
1230 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1231 source_id = dma_frcd_source_id(data);
1232
1233 guest_addr = dmar_readq(iommu->reg + reg +
1234 fault_index * PRIMARY_FAULT_REG_LEN);
1235 guest_addr = dma_frcd_page_addr(guest_addr);
1236 /* clear the fault */
1237 writel(DMA_FRCD_F, iommu->reg + reg +
1238 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1239
1f5b3c3f 1240 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1241
1242 dmar_fault_do_one(iommu, type, fault_reason,
1243 source_id, guest_addr);
1244
1245 fault_index++;
8211a7b5 1246 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1247 fault_index = 0;
1f5b3c3f 1248 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1249 }
9d783ba0
SS
1250clear_rest:
1251 /* clear all the other faults */
0ac2491f 1252 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1253 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f 1254
1f5b3c3f 1255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1256 return IRQ_HANDLED;
1257}
1258
1259int dmar_set_interrupt(struct intel_iommu *iommu)
1260{
1261 int irq, ret;
1262
9d783ba0
SS
1263 /*
1264 * Check if the fault interrupt is already initialized.
1265 */
1266 if (iommu->irq)
1267 return 0;
1268
0ac2491f
SS
1269 irq = create_irq();
1270 if (!irq) {
bf947fcb 1271 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1272 return -EINVAL;
1273 }
1274
dced35ae 1275 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1276 iommu->irq = irq;
1277
1278 ret = arch_setup_dmar_msi(irq);
1279 if (ret) {
dced35ae 1280 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1281 iommu->irq = 0;
1282 destroy_irq(irq);
dd726435 1283 return ret;
0ac2491f
SS
1284 }
1285
477694e7 1286 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1287 if (ret)
bf947fcb 1288 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1289 return ret;
1290}
9d783ba0
SS
1291
1292int __init enable_drhd_fault_handling(void)
1293{
1294 struct dmar_drhd_unit *drhd;
1295
1296 /*
1297 * Enable fault control interrupt.
1298 */
1299 for_each_drhd_unit(drhd) {
1300 int ret;
1301 struct intel_iommu *iommu = drhd->iommu;
1302 ret = dmar_set_interrupt(iommu);
1303
1304 if (ret) {
bf947fcb 1305 pr_err("DRHD %Lx: failed to enable fault, "
9d783ba0
SS
1306 " interrupt, ret %d\n",
1307 (unsigned long long)drhd->reg_base_addr, ret);
1308 return -1;
1309 }
7f99d946
SS
1310
1311 /*
1312 * Clear any previous faults.
1313 */
1314 dmar_fault(iommu->irq, iommu);
9d783ba0
SS
1315 }
1316
1317 return 0;
1318}
eb4a52bc
FY
1319
1320/*
1321 * Re-enable Queued Invalidation interface.
1322 */
1323int dmar_reenable_qi(struct intel_iommu *iommu)
1324{
1325 if (!ecap_qis(iommu->ecap))
1326 return -ENOENT;
1327
1328 if (!iommu->qi)
1329 return -ENOENT;
1330
1331 /*
1332 * First disable queued invalidation.
1333 */
1334 dmar_disable_qi(iommu);
1335 /*
1336 * Then enable queued invalidation again. Since there is no pending
1337 * invalidation requests now, it's safe to re-enable queued
1338 * invalidation.
1339 */
1340 __dmar_enable_qi(iommu);
1341
1342 return 0;
1343}
074835f0
YS
1344
1345/*
1346 * Check interrupt remapping support in DMAR table description.
1347 */
0b8973a8 1348int __init dmar_ir_support(void)
074835f0
YS
1349{
1350 struct acpi_table_dmar *dmar;
1351 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1352 if (!dmar)
1353 return 0;
074835f0
YS
1354 return dmar->flags & 0x1;
1355}
4db77ff3 1356IOMMU_INIT_POST(detect_intel_iommu);