2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/export.h>
29 #include <linux/iommu.h>
30 #include <asm/pci-direct.h>
31 #include <asm/iommu.h>
33 #include <asm/x86_init.h>
34 #include <asm/iommu_table.h>
35 #include <asm/io_apic.h>
36 #include <asm/irq_remapping.h>
38 #include "amd_iommu_proto.h"
39 #include "amd_iommu_types.h"
40 #include "irq_remapping.h"
43 * definitions for the ACPI scanning code
45 #define IVRS_HEADER_LENGTH 48
47 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
48 #define ACPI_IVMD_TYPE_ALL 0x20
49 #define ACPI_IVMD_TYPE 0x21
50 #define ACPI_IVMD_TYPE_RANGE 0x22
52 #define IVHD_DEV_ALL 0x01
53 #define IVHD_DEV_SELECT 0x02
54 #define IVHD_DEV_SELECT_RANGE_START 0x03
55 #define IVHD_DEV_RANGE_END 0x04
56 #define IVHD_DEV_ALIAS 0x42
57 #define IVHD_DEV_ALIAS_RANGE 0x43
58 #define IVHD_DEV_EXT_SELECT 0x46
59 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
60 #define IVHD_DEV_SPECIAL 0x48
61 #define IVHD_DEV_ACPI_HID 0xf0
63 #define UID_NOT_PRESENT 0
64 #define UID_IS_INTEGER 1
65 #define UID_IS_CHARACTER 2
67 #define IVHD_SPECIAL_IOAPIC 1
68 #define IVHD_SPECIAL_HPET 2
70 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
71 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
72 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
73 #define IVHD_FLAG_ISOC_EN_MASK 0x08
75 #define IVMD_FLAG_EXCL_RANGE 0x08
76 #define IVMD_FLAG_UNITY_MAP 0x01
78 #define ACPI_DEVFLAG_INITPASS 0x01
79 #define ACPI_DEVFLAG_EXTINT 0x02
80 #define ACPI_DEVFLAG_NMI 0x04
81 #define ACPI_DEVFLAG_SYSMGT1 0x10
82 #define ACPI_DEVFLAG_SYSMGT2 0x20
83 #define ACPI_DEVFLAG_LINT0 0x40
84 #define ACPI_DEVFLAG_LINT1 0x80
85 #define ACPI_DEVFLAG_ATSDIS 0x10000000
87 #define LOOP_TIMEOUT 100000
89 * ACPI table definitions
91 * These data structures are laid over the table to parse the important values
96 * structure describing one IOMMU in the ACPI table. Typically followed by one
97 * or more ivhd_entrys.
110 /* Following only valid on IVHD type 11h and 40h */
111 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
113 } __attribute__((packed));
116 * A device entry describing which devices a specific IOMMU translates and
117 * which requestor ids they use.
129 } __attribute__((packed));
132 * An AMD IOMMU memory definition structure. It defines things like exclusion
133 * ranges for devices and regions that should be unity mapped.
144 } __attribute__((packed));
147 bool amd_iommu_irq_remap __read_mostly;
149 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
151 static bool amd_iommu_detected;
152 static bool __initdata amd_iommu_disabled;
153 static int amd_iommu_target_ivhd_type;
155 u16 amd_iommu_last_bdf; /* largest PCI device id we have
157 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
159 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
161 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
164 /* Array to assign indices to IOMMUs*/
165 struct amd_iommu *amd_iommus[MAX_IOMMUS];
166 int amd_iommus_present;
168 /* IOMMUs have a non-present cache? */
169 bool amd_iommu_np_cache __read_mostly;
170 bool amd_iommu_iotlb_sup __read_mostly = true;
172 u32 amd_iommu_max_pasid __read_mostly = ~0;
174 bool amd_iommu_v2_present __read_mostly;
175 static bool amd_iommu_pc_present __read_mostly;
177 bool amd_iommu_force_isolation __read_mostly;
180 * List of protection domains - used during resume
182 LIST_HEAD(amd_iommu_pd_list);
183 spinlock_t amd_iommu_pd_lock;
186 * Pointer to the device table which is shared by all AMD IOMMUs
187 * it is indexed by the PCI device id or the HT unit id and contains
188 * information about the domain the device belongs to as well as the
189 * page table root pointer.
191 struct dev_table_entry *amd_iommu_dev_table;
194 * The alias table is a driver specific data structure which contains the
195 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
196 * More than one device can share the same requestor id.
198 u16 *amd_iommu_alias_table;
201 * The rlookup table is used to find the IOMMU which is responsible
202 * for a specific device. It is also indexed by the PCI device id.
204 struct amd_iommu **amd_iommu_rlookup_table;
207 * This table is used to find the irq remapping table for a given device id
210 struct irq_remap_table **irq_lookup_table;
213 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
214 * to know which ones are already in use.
216 unsigned long *amd_iommu_pd_alloc_bitmap;
218 static u32 dev_table_size; /* size of the device table */
219 static u32 alias_table_size; /* size of the alias table */
220 static u32 rlookup_table_size; /* size if the rlookup table */
222 enum iommu_init_state {
235 /* Early ioapic and hpet maps from kernel command line */
236 #define EARLY_MAP_SIZE 4
237 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
238 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
239 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
241 static int __initdata early_ioapic_map_size;
242 static int __initdata early_hpet_map_size;
243 static int __initdata early_acpihid_map_size;
245 static bool __initdata cmdline_maps;
247 static enum iommu_init_state init_state = IOMMU_START_STATE;
249 static int amd_iommu_enable_interrupts(void);
250 static int __init iommu_go_to_state(enum iommu_init_state state);
251 static void init_device_table_dma(void);
253 static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
254 u8 bank, u8 cntr, u8 fxn,
255 u64 *value, bool is_write);
257 static inline void update_last_devid(u16 devid)
259 if (devid > amd_iommu_last_bdf)
260 amd_iommu_last_bdf = devid;
263 static inline unsigned long tbl_size(int entry_size)
265 unsigned shift = PAGE_SHIFT +
266 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
271 /* Access to l1 and l2 indexed register spaces */
273 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
277 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
278 pci_read_config_dword(iommu->dev, 0xfc, &val);
282 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
284 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
285 pci_write_config_dword(iommu->dev, 0xfc, val);
286 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
289 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
293 pci_write_config_dword(iommu->dev, 0xf0, address);
294 pci_read_config_dword(iommu->dev, 0xf4, &val);
298 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
300 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
301 pci_write_config_dword(iommu->dev, 0xf4, val);
304 /****************************************************************************
306 * AMD IOMMU MMIO register space handling functions
308 * These functions are used to program the IOMMU device registers in
309 * MMIO space required for that driver.
311 ****************************************************************************/
314 * This function set the exclusion range in the IOMMU. DMA accesses to the
315 * exclusion range are passed through untranslated
317 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
319 u64 start = iommu->exclusion_start & PAGE_MASK;
320 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
323 if (!iommu->exclusion_start)
326 entry = start | MMIO_EXCL_ENABLE_MASK;
327 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
328 &entry, sizeof(entry));
331 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
332 &entry, sizeof(entry));
335 /* Programs the physical address of the device table into the IOMMU hardware */
336 static void iommu_set_device_table(struct amd_iommu *iommu)
340 BUG_ON(iommu->mmio_base == NULL);
342 entry = virt_to_phys(amd_iommu_dev_table);
343 entry |= (dev_table_size >> 12) - 1;
344 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
345 &entry, sizeof(entry));
348 /* Generic functions to enable/disable certain features of the IOMMU. */
349 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
353 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
355 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
358 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
362 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
364 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
367 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
371 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
372 ctrl &= ~CTRL_INV_TO_MASK;
373 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
374 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
377 /* Function to enable the hardware */
378 static void iommu_enable(struct amd_iommu *iommu)
380 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
383 static void iommu_disable(struct amd_iommu *iommu)
385 /* Disable command buffer */
386 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
388 /* Disable event logging and event interrupts */
389 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
390 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
392 /* Disable IOMMU GA_LOG */
393 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
394 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
396 /* Disable IOMMU hardware itself */
397 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
401 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
402 * the system has one.
404 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
406 if (!request_mem_region(address, end, "amd_iommu")) {
407 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
409 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
413 return (u8 __iomem *)ioremap_nocache(address, end);
416 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
418 if (iommu->mmio_base)
419 iounmap(iommu->mmio_base);
420 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
423 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
439 /****************************************************************************
441 * The functions below belong to the first pass of AMD IOMMU ACPI table
442 * parsing. In this pass we try to find out the highest device id this
443 * code has to handle. Upon this information the size of the shared data
444 * structures is determined later.
446 ****************************************************************************/
449 * This function calculates the length of a given IVHD entry
451 static inline int ivhd_entry_length(u8 *ivhd)
453 u32 type = ((struct ivhd_entry *)ivhd)->type;
456 return 0x04 << (*ivhd >> 6);
457 } else if (type == IVHD_DEV_ACPI_HID) {
458 /* For ACPI_HID, offset 21 is uid len */
459 return *((u8 *)ivhd + 21) + 22;
465 * After reading the highest device id from the IOMMU PCI capability header
466 * this function looks if there is a higher device id defined in the ACPI table
468 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
470 u8 *p = (void *)h, *end = (void *)h;
471 struct ivhd_entry *dev;
473 u32 ivhd_size = get_ivhd_header_size(h);
476 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
484 dev = (struct ivhd_entry *)p;
487 /* Use maximum BDF value for DEV_ALL */
488 update_last_devid(0xffff);
490 case IVHD_DEV_SELECT:
491 case IVHD_DEV_RANGE_END:
493 case IVHD_DEV_EXT_SELECT:
494 /* all the above subfield types refer to device ids */
495 update_last_devid(dev->devid);
500 p += ivhd_entry_length(p);
508 static int __init check_ivrs_checksum(struct acpi_table_header *table)
511 u8 checksum = 0, *p = (u8 *)table;
513 for (i = 0; i < table->length; ++i)
516 /* ACPI table corrupt */
517 pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
525 * Iterate over all IVHD entries in the ACPI table and find the highest device
526 * id which we need to handle. This is the first of three functions which parse
527 * the ACPI table. So we check the checksum here.
529 static int __init find_last_devid_acpi(struct acpi_table_header *table)
531 u8 *p = (u8 *)table, *end = (u8 *)table;
532 struct ivhd_header *h;
534 p += IVRS_HEADER_LENGTH;
536 end += table->length;
538 h = (struct ivhd_header *)p;
539 if (h->type == amd_iommu_target_ivhd_type) {
540 int ret = find_last_devid_from_ivhd(h);
552 /****************************************************************************
554 * The following functions belong to the code path which parses the ACPI table
555 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
556 * data structures, initialize the device/alias/rlookup table and also
557 * basically initialize the hardware.
559 ****************************************************************************/
562 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
563 * write commands to that buffer later and the IOMMU will execute them
566 static int __init alloc_command_buffer(struct amd_iommu *iommu)
568 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
569 get_order(CMD_BUFFER_SIZE));
571 return iommu->cmd_buf ? 0 : -ENOMEM;
575 * This function resets the command buffer if the IOMMU stopped fetching
578 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
580 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
582 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
583 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
585 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
589 * This function writes the command buffer address to the hardware and
592 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
596 BUG_ON(iommu->cmd_buf == NULL);
598 entry = (u64)virt_to_phys(iommu->cmd_buf);
599 entry |= MMIO_CMD_SIZE_512;
601 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
602 &entry, sizeof(entry));
604 amd_iommu_reset_cmd_buffer(iommu);
607 static void __init free_command_buffer(struct amd_iommu *iommu)
609 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
612 /* allocates the memory where the IOMMU will log its events to */
613 static int __init alloc_event_buffer(struct amd_iommu *iommu)
615 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
616 get_order(EVT_BUFFER_SIZE));
618 return iommu->evt_buf ? 0 : -ENOMEM;
621 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
625 BUG_ON(iommu->evt_buf == NULL);
627 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
629 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
630 &entry, sizeof(entry));
632 /* set head and tail to zero manually */
633 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
634 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
636 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
639 static void __init free_event_buffer(struct amd_iommu *iommu)
641 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
644 /* allocates the memory where the IOMMU will log its events to */
645 static int __init alloc_ppr_log(struct amd_iommu *iommu)
647 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
648 get_order(PPR_LOG_SIZE));
650 return iommu->ppr_log ? 0 : -ENOMEM;
653 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
657 if (iommu->ppr_log == NULL)
660 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
662 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
663 &entry, sizeof(entry));
665 /* set head and tail to zero manually */
666 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
667 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
669 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
670 iommu_feature_enable(iommu, CONTROL_PPR_EN);
673 static void __init free_ppr_log(struct amd_iommu *iommu)
675 if (iommu->ppr_log == NULL)
678 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
681 static void free_ga_log(struct amd_iommu *iommu)
683 #ifdef CONFIG_IRQ_REMAP
685 free_pages((unsigned long)iommu->ga_log,
686 get_order(GA_LOG_SIZE));
687 if (iommu->ga_log_tail)
688 free_pages((unsigned long)iommu->ga_log_tail,
693 static int iommu_ga_log_enable(struct amd_iommu *iommu)
695 #ifdef CONFIG_IRQ_REMAP
701 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
703 /* Check if already running */
704 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
707 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
708 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
710 for (i = 0; i < LOOP_TIMEOUT; ++i) {
711 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
712 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
716 if (i >= LOOP_TIMEOUT)
718 #endif /* CONFIG_IRQ_REMAP */
722 #ifdef CONFIG_IRQ_REMAP
723 static int iommu_init_ga_log(struct amd_iommu *iommu)
727 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
730 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
731 get_order(GA_LOG_SIZE));
735 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
737 if (!iommu->ga_log_tail)
740 entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
741 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
742 &entry, sizeof(entry));
743 entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
744 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
745 &entry, sizeof(entry));
746 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
747 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
754 #endif /* CONFIG_IRQ_REMAP */
756 static int iommu_init_ga(struct amd_iommu *iommu)
760 #ifdef CONFIG_IRQ_REMAP
761 /* Note: We have already checked GASup from IVRS table.
762 * Now, we need to make sure that GAMSup is set.
764 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
765 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
766 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
768 ret = iommu_init_ga_log(iommu);
769 #endif /* CONFIG_IRQ_REMAP */
774 static void iommu_enable_gt(struct amd_iommu *iommu)
776 if (!iommu_feature(iommu, FEATURE_GT))
779 iommu_feature_enable(iommu, CONTROL_GT_EN);
782 /* sets a specific bit in the device table entry. */
783 static void set_dev_entry_bit(u16 devid, u8 bit)
785 int i = (bit >> 6) & 0x03;
786 int _bit = bit & 0x3f;
788 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
791 static int get_dev_entry_bit(u16 devid, u8 bit)
793 int i = (bit >> 6) & 0x03;
794 int _bit = bit & 0x3f;
796 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
800 void amd_iommu_apply_erratum_63(u16 devid)
804 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
805 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
808 set_dev_entry_bit(devid, DEV_ENTRY_IW);
811 /* Writes the specific IOMMU for a device into the rlookup table */
812 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
814 amd_iommu_rlookup_table[devid] = iommu;
818 * This function takes the device specific flags read from the ACPI
819 * table and sets up the device table entry with that information
821 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
822 u16 devid, u32 flags, u32 ext_flags)
824 if (flags & ACPI_DEVFLAG_INITPASS)
825 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
826 if (flags & ACPI_DEVFLAG_EXTINT)
827 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
828 if (flags & ACPI_DEVFLAG_NMI)
829 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
830 if (flags & ACPI_DEVFLAG_SYSMGT1)
831 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
832 if (flags & ACPI_DEVFLAG_SYSMGT2)
833 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
834 if (flags & ACPI_DEVFLAG_LINT0)
835 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
836 if (flags & ACPI_DEVFLAG_LINT1)
837 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
839 amd_iommu_apply_erratum_63(devid);
841 set_iommu_for_device(iommu, devid);
844 static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
846 struct devid_map *entry;
847 struct list_head *list;
849 if (type == IVHD_SPECIAL_IOAPIC)
851 else if (type == IVHD_SPECIAL_HPET)
856 list_for_each_entry(entry, list, list) {
857 if (!(entry->id == id && entry->cmd_line))
860 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
861 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
863 *devid = entry->devid;
868 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
873 entry->devid = *devid;
874 entry->cmd_line = cmd_line;
876 list_add_tail(&entry->list, list);
881 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
884 struct acpihid_map_entry *entry;
885 struct list_head *list = &acpihid_map;
887 list_for_each_entry(entry, list, list) {
888 if (strcmp(entry->hid, hid) ||
889 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
893 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
895 *devid = entry->devid;
899 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
903 memcpy(entry->uid, uid, strlen(uid));
904 memcpy(entry->hid, hid, strlen(hid));
905 entry->devid = *devid;
906 entry->cmd_line = cmd_line;
907 entry->root_devid = (entry->devid & (~0x7));
909 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
910 entry->cmd_line ? "cmd" : "ivrs",
911 entry->hid, entry->uid, entry->root_devid);
913 list_add_tail(&entry->list, list);
917 static int __init add_early_maps(void)
921 for (i = 0; i < early_ioapic_map_size; ++i) {
922 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
923 early_ioapic_map[i].id,
924 &early_ioapic_map[i].devid,
925 early_ioapic_map[i].cmd_line);
930 for (i = 0; i < early_hpet_map_size; ++i) {
931 ret = add_special_device(IVHD_SPECIAL_HPET,
932 early_hpet_map[i].id,
933 &early_hpet_map[i].devid,
934 early_hpet_map[i].cmd_line);
939 for (i = 0; i < early_acpihid_map_size; ++i) {
940 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
941 early_acpihid_map[i].uid,
942 &early_acpihid_map[i].devid,
943 early_acpihid_map[i].cmd_line);
952 * Reads the device exclusion range from ACPI and initializes the IOMMU with
955 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
957 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
959 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
964 * We only can configure exclusion ranges per IOMMU, not
965 * per device. But we can enable the exclusion range per
966 * device. This is done here
968 set_dev_entry_bit(devid, DEV_ENTRY_EX);
969 iommu->exclusion_start = m->range_start;
970 iommu->exclusion_length = m->range_length;
975 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
976 * initializes the hardware and our data structures with it.
978 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
979 struct ivhd_header *h)
982 u8 *end = p, flags = 0;
983 u16 devid = 0, devid_start = 0, devid_to = 0;
984 u32 dev_i, ext_flags = 0;
986 struct ivhd_entry *e;
991 ret = add_early_maps();
996 * First save the recommended feature enable bits from ACPI
998 iommu->acpi_flags = h->flags;
1001 * Done. Now parse the device entries
1003 ivhd_size = get_ivhd_header_size(h);
1005 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1015 e = (struct ivhd_entry *)p;
1019 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1021 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1022 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1024 case IVHD_DEV_SELECT:
1026 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1028 PCI_BUS_NUM(e->devid),
1034 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1036 case IVHD_DEV_SELECT_RANGE_START:
1038 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1039 "devid: %02x:%02x.%x flags: %02x\n",
1040 PCI_BUS_NUM(e->devid),
1045 devid_start = e->devid;
1050 case IVHD_DEV_ALIAS:
1052 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1053 "flags: %02x devid_to: %02x:%02x.%x\n",
1054 PCI_BUS_NUM(e->devid),
1058 PCI_BUS_NUM(e->ext >> 8),
1059 PCI_SLOT(e->ext >> 8),
1060 PCI_FUNC(e->ext >> 8));
1063 devid_to = e->ext >> 8;
1064 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1065 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1066 amd_iommu_alias_table[devid] = devid_to;
1068 case IVHD_DEV_ALIAS_RANGE:
1070 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1071 "devid: %02x:%02x.%x flags: %02x "
1072 "devid_to: %02x:%02x.%x\n",
1073 PCI_BUS_NUM(e->devid),
1077 PCI_BUS_NUM(e->ext >> 8),
1078 PCI_SLOT(e->ext >> 8),
1079 PCI_FUNC(e->ext >> 8));
1081 devid_start = e->devid;
1083 devid_to = e->ext >> 8;
1087 case IVHD_DEV_EXT_SELECT:
1089 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1090 "flags: %02x ext: %08x\n",
1091 PCI_BUS_NUM(e->devid),
1097 set_dev_entry_from_acpi(iommu, devid, e->flags,
1100 case IVHD_DEV_EXT_SELECT_RANGE:
1102 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1103 "%02x:%02x.%x flags: %02x ext: %08x\n",
1104 PCI_BUS_NUM(e->devid),
1109 devid_start = e->devid;
1114 case IVHD_DEV_RANGE_END:
1116 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1117 PCI_BUS_NUM(e->devid),
1119 PCI_FUNC(e->devid));
1122 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1124 amd_iommu_alias_table[dev_i] = devid_to;
1125 set_dev_entry_from_acpi(iommu,
1126 devid_to, flags, ext_flags);
1128 set_dev_entry_from_acpi(iommu, dev_i,
1132 case IVHD_DEV_SPECIAL: {
1138 handle = e->ext & 0xff;
1139 devid = (e->ext >> 8) & 0xffff;
1140 type = (e->ext >> 24) & 0xff;
1142 if (type == IVHD_SPECIAL_IOAPIC)
1144 else if (type == IVHD_SPECIAL_HPET)
1149 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1155 ret = add_special_device(type, handle, &devid, false);
1160 * add_special_device might update the devid in case a
1161 * command-line override is present. So call
1162 * set_dev_entry_from_acpi after add_special_device.
1164 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1168 case IVHD_DEV_ACPI_HID: {
1170 u8 hid[ACPIHID_HID_LEN] = {0};
1171 u8 uid[ACPIHID_UID_LEN] = {0};
1174 if (h->type != 0x40) {
1175 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1180 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1181 hid[ACPIHID_HID_LEN - 1] = '\0';
1184 pr_err(FW_BUG "Invalid HID.\n");
1189 case UID_NOT_PRESENT:
1192 pr_warn(FW_BUG "Invalid UID length.\n");
1195 case UID_IS_INTEGER:
1197 sprintf(uid, "%d", e->uid);
1200 case UID_IS_CHARACTER:
1202 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1203 uid[ACPIHID_UID_LEN - 1] = '\0';
1211 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1219 ret = add_acpi_hid_device(hid, uid, &devid, false);
1224 * add_special_device might update the devid in case a
1225 * command-line override is present. So call
1226 * set_dev_entry_from_acpi after add_special_device.
1228 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1236 p += ivhd_entry_length(p);
1242 static void __init free_iommu_one(struct amd_iommu *iommu)
1244 free_command_buffer(iommu);
1245 free_event_buffer(iommu);
1246 free_ppr_log(iommu);
1248 iommu_unmap_mmio_space(iommu);
1251 static void __init free_iommu_all(void)
1253 struct amd_iommu *iommu, *next;
1255 for_each_iommu_safe(iommu, next) {
1256 list_del(&iommu->list);
1257 free_iommu_one(iommu);
1263 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1265 * BIOS should disable L2B micellaneous clock gating by setting
1266 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1268 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1272 if ((boot_cpu_data.x86 != 0x15) ||
1273 (boot_cpu_data.x86_model < 0x10) ||
1274 (boot_cpu_data.x86_model > 0x1f))
1277 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1278 pci_read_config_dword(iommu->dev, 0xf4, &value);
1283 /* Select NB indirect register 0x90 and enable writing */
1284 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1286 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1287 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1288 dev_name(&iommu->dev->dev));
1290 /* Clear the enable writing bit */
1291 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1295 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1297 * BIOS should enable ATS write permission check by setting
1298 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1300 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1304 if ((boot_cpu_data.x86 != 0x15) ||
1305 (boot_cpu_data.x86_model < 0x30) ||
1306 (boot_cpu_data.x86_model > 0x3f))
1309 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1310 value = iommu_read_l2(iommu, 0x47);
1315 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1316 iommu_write_l2(iommu, 0x47, value | BIT(0));
1318 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1319 dev_name(&iommu->dev->dev));
1323 * This function clues the initialization function for one IOMMU
1324 * together and also allocates the command buffer and programs the
1325 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1327 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1331 spin_lock_init(&iommu->lock);
1333 /* Add IOMMU to internal data structures */
1334 list_add_tail(&iommu->list, &amd_iommu_list);
1335 iommu->index = amd_iommus_present++;
1337 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1338 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1342 /* Index is fine - add IOMMU to the array */
1343 amd_iommus[iommu->index] = iommu;
1346 * Copy data from ACPI table entry to the iommu struct
1348 iommu->devid = h->devid;
1349 iommu->cap_ptr = h->cap_ptr;
1350 iommu->pci_seg = h->pci_seg;
1351 iommu->mmio_phys = h->mmio_phys;
1355 /* Check if IVHD EFR contains proper max banks/counters */
1356 if ((h->efr_attr != 0) &&
1357 ((h->efr_attr & (0xF << 13)) != 0) &&
1358 ((h->efr_attr & (0x3F << 17)) != 0))
1359 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1361 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1362 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1363 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1367 if (h->efr_reg & (1 << 9))
1368 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1370 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1371 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1372 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1378 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1379 iommu->mmio_phys_end);
1380 if (!iommu->mmio_base)
1383 if (alloc_command_buffer(iommu))
1386 if (alloc_event_buffer(iommu))
1389 iommu->int_enabled = false;
1391 ret = init_iommu_from_acpi(iommu, h);
1395 ret = amd_iommu_create_irq_domain(iommu);
1400 * Make sure IOMMU is not considered to translate itself. The IVRS
1401 * table tells us so, but this is a lie!
1403 amd_iommu_rlookup_table[iommu->devid] = NULL;
1409 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1410 * @ivrs Pointer to the IVRS header
1412 * This function search through all IVDB of the maximum supported IVHD
1414 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1416 u8 *base = (u8 *)ivrs;
1417 struct ivhd_header *ivhd = (struct ivhd_header *)
1418 (base + IVRS_HEADER_LENGTH);
1419 u8 last_type = ivhd->type;
1420 u16 devid = ivhd->devid;
1422 while (((u8 *)ivhd - base < ivrs->length) &&
1423 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1424 u8 *p = (u8 *) ivhd;
1426 if (ivhd->devid == devid)
1427 last_type = ivhd->type;
1428 ivhd = (struct ivhd_header *)(p + ivhd->length);
1435 * Iterates over all IOMMU entries in the ACPI table, allocates the
1436 * IOMMU structure and initializes it with init_iommu_one()
1438 static int __init init_iommu_all(struct acpi_table_header *table)
1440 u8 *p = (u8 *)table, *end = (u8 *)table;
1441 struct ivhd_header *h;
1442 struct amd_iommu *iommu;
1445 end += table->length;
1446 p += IVRS_HEADER_LENGTH;
1449 h = (struct ivhd_header *)p;
1450 if (*p == amd_iommu_target_ivhd_type) {
1452 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1453 "seg: %d flags: %01x info %04x\n",
1454 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1455 PCI_FUNC(h->devid), h->cap_ptr,
1456 h->pci_seg, h->flags, h->info);
1457 DUMP_printk(" mmio-addr: %016llx\n",
1460 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1464 ret = init_iommu_one(iommu, h);
1477 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1479 u64 val = 0xabcd, val2 = 0;
1481 if (!iommu_feature(iommu, FEATURE_PC))
1484 amd_iommu_pc_present = true;
1486 /* Check if the performance counters can be written to */
1487 if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
1488 (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
1490 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1491 amd_iommu_pc_present = false;
1495 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1497 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1498 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1499 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1502 static ssize_t amd_iommu_show_cap(struct device *dev,
1503 struct device_attribute *attr,
1506 struct amd_iommu *iommu = dev_get_drvdata(dev);
1507 return sprintf(buf, "%x\n", iommu->cap);
1509 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1511 static ssize_t amd_iommu_show_features(struct device *dev,
1512 struct device_attribute *attr,
1515 struct amd_iommu *iommu = dev_get_drvdata(dev);
1516 return sprintf(buf, "%llx\n", iommu->features);
1518 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1520 static struct attribute *amd_iommu_attrs[] = {
1522 &dev_attr_features.attr,
1526 static struct attribute_group amd_iommu_group = {
1527 .name = "amd-iommu",
1528 .attrs = amd_iommu_attrs,
1531 static const struct attribute_group *amd_iommu_groups[] = {
1536 static int iommu_init_pci(struct amd_iommu *iommu)
1538 int cap_ptr = iommu->cap_ptr;
1539 u32 range, misc, low, high;
1542 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1543 iommu->devid & 0xff);
1547 /* Prevent binding other PCI device drivers to IOMMU devices */
1548 iommu->dev->match_driver = false;
1550 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1552 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1554 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1557 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1558 amd_iommu_iotlb_sup = false;
1560 /* read extended feature bits */
1561 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1562 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1564 iommu->features = ((u64)high << 32) | low;
1566 if (iommu_feature(iommu, FEATURE_GT)) {
1571 pasmax = iommu->features & FEATURE_PASID_MASK;
1572 pasmax >>= FEATURE_PASID_SHIFT;
1573 max_pasid = (1 << (pasmax + 1)) - 1;
1575 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1577 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1579 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1580 glxval >>= FEATURE_GLXVAL_SHIFT;
1582 if (amd_iommu_max_glx_val == -1)
1583 amd_iommu_max_glx_val = glxval;
1585 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1588 if (iommu_feature(iommu, FEATURE_GT) &&
1589 iommu_feature(iommu, FEATURE_PPR)) {
1590 iommu->is_iommu_v2 = true;
1591 amd_iommu_v2_present = true;
1594 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1597 ret = iommu_init_ga(iommu);
1601 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1602 amd_iommu_np_cache = true;
1604 init_iommu_perf_ctr(iommu);
1606 if (is_rd890_iommu(iommu->dev)) {
1609 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1613 * Some rd890 systems may not be fully reconfigured by the
1614 * BIOS, so it's necessary for us to store this information so
1615 * it can be reprogrammed on resume
1617 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1618 &iommu->stored_addr_lo);
1619 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1620 &iommu->stored_addr_hi);
1622 /* Low bit locks writes to configuration space */
1623 iommu->stored_addr_lo &= ~1;
1625 for (i = 0; i < 6; i++)
1626 for (j = 0; j < 0x12; j++)
1627 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1629 for (i = 0; i < 0x83; i++)
1630 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1633 amd_iommu_erratum_746_workaround(iommu);
1634 amd_iommu_ats_write_check_workaround(iommu);
1636 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1637 amd_iommu_groups, "ivhd%d",
1640 return pci_enable_device(iommu->dev);
1643 static void print_iommu_info(void)
1645 static const char * const feat_str[] = {
1646 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1647 "IA", "GA", "HE", "PC"
1649 struct amd_iommu *iommu;
1651 for_each_iommu(iommu) {
1654 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1655 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1657 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1658 pr_info("AMD-Vi: Extended features (%#llx):\n",
1660 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1661 if (iommu_feature(iommu, (1ULL << i)))
1662 pr_cont(" %s", feat_str[i]);
1665 if (iommu->features & FEATURE_GAM_VAPIC)
1666 pr_cont(" GA_vAPIC");
1671 if (irq_remapping_enabled) {
1672 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1673 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1674 pr_info("AMD-Vi: virtual APIC enabled\n");
1678 static int __init amd_iommu_init_pci(void)
1680 struct amd_iommu *iommu;
1683 for_each_iommu(iommu) {
1684 ret = iommu_init_pci(iommu);
1690 * Order is important here to make sure any unity map requirements are
1691 * fulfilled. The unity mappings are created and written to the device
1692 * table during the amd_iommu_init_api() call.
1694 * After that we call init_device_table_dma() to make sure any
1695 * uninitialized DTE will block DMA, and in the end we flush the caches
1696 * of all IOMMUs to make sure the changes to the device table are
1699 ret = amd_iommu_init_api();
1701 init_device_table_dma();
1703 for_each_iommu(iommu)
1704 iommu_flush_all_caches(iommu);
1712 /****************************************************************************
1714 * The following functions initialize the MSI interrupts for all IOMMUs
1715 * in the system. It's a bit challenging because there could be multiple
1716 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1719 ****************************************************************************/
1721 static int iommu_setup_msi(struct amd_iommu *iommu)
1725 r = pci_enable_msi(iommu->dev);
1729 r = request_threaded_irq(iommu->dev->irq,
1730 amd_iommu_int_handler,
1731 amd_iommu_int_thread,
1736 pci_disable_msi(iommu->dev);
1740 iommu->int_enabled = true;
1745 static int iommu_init_msi(struct amd_iommu *iommu)
1749 if (iommu->int_enabled)
1752 if (iommu->dev->msi_cap)
1753 ret = iommu_setup_msi(iommu);
1761 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1763 if (iommu->ppr_log != NULL)
1764 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1766 iommu_ga_log_enable(iommu);
1771 /****************************************************************************
1773 * The next functions belong to the third pass of parsing the ACPI
1774 * table. In this last pass the memory mapping requirements are
1775 * gathered (like exclusion and unity mapping ranges).
1777 ****************************************************************************/
1779 static void __init free_unity_maps(void)
1781 struct unity_map_entry *entry, *next;
1783 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1784 list_del(&entry->list);
1789 /* called when we find an exclusion range definition in ACPI */
1790 static int __init init_exclusion_range(struct ivmd_header *m)
1795 case ACPI_IVMD_TYPE:
1796 set_device_exclusion_range(m->devid, m);
1798 case ACPI_IVMD_TYPE_ALL:
1799 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1800 set_device_exclusion_range(i, m);
1802 case ACPI_IVMD_TYPE_RANGE:
1803 for (i = m->devid; i <= m->aux; ++i)
1804 set_device_exclusion_range(i, m);
1813 /* called for unity map ACPI definition */
1814 static int __init init_unity_map_range(struct ivmd_header *m)
1816 struct unity_map_entry *e = NULL;
1819 e = kzalloc(sizeof(*e), GFP_KERNEL);
1827 case ACPI_IVMD_TYPE:
1828 s = "IVMD_TYPEi\t\t\t";
1829 e->devid_start = e->devid_end = m->devid;
1831 case ACPI_IVMD_TYPE_ALL:
1832 s = "IVMD_TYPE_ALL\t\t";
1834 e->devid_end = amd_iommu_last_bdf;
1836 case ACPI_IVMD_TYPE_RANGE:
1837 s = "IVMD_TYPE_RANGE\t\t";
1838 e->devid_start = m->devid;
1839 e->devid_end = m->aux;
1842 e->address_start = PAGE_ALIGN(m->range_start);
1843 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1844 e->prot = m->flags >> 1;
1846 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1847 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1848 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1849 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1850 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1851 e->address_start, e->address_end, m->flags);
1853 list_add_tail(&e->list, &amd_iommu_unity_map);
1858 /* iterates over all memory definitions we find in the ACPI table */
1859 static int __init init_memory_definitions(struct acpi_table_header *table)
1861 u8 *p = (u8 *)table, *end = (u8 *)table;
1862 struct ivmd_header *m;
1864 end += table->length;
1865 p += IVRS_HEADER_LENGTH;
1868 m = (struct ivmd_header *)p;
1869 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1870 init_exclusion_range(m);
1871 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1872 init_unity_map_range(m);
1881 * Init the device table to not allow DMA access for devices and
1882 * suppress all page faults
1884 static void init_device_table_dma(void)
1888 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1889 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1890 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1894 static void __init uninit_device_table_dma(void)
1898 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1899 amd_iommu_dev_table[devid].data[0] = 0ULL;
1900 amd_iommu_dev_table[devid].data[1] = 0ULL;
1904 static void init_device_table(void)
1908 if (!amd_iommu_irq_remap)
1911 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1912 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1915 static void iommu_init_flags(struct amd_iommu *iommu)
1917 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1918 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1919 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1921 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1922 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1923 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1925 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1926 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1927 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1929 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1930 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1931 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1934 * make IOMMU memory accesses cache coherent
1936 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1938 /* Set IOTLB invalidation timeout to 1s */
1939 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1942 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1945 u32 ioc_feature_control;
1946 struct pci_dev *pdev = iommu->root_pdev;
1948 /* RD890 BIOSes may not have completely reconfigured the iommu */
1949 if (!is_rd890_iommu(iommu->dev) || !pdev)
1953 * First, we need to ensure that the iommu is enabled. This is
1954 * controlled by a register in the northbridge
1957 /* Select Northbridge indirect register 0x75 and enable writing */
1958 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1959 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1961 /* Enable the iommu */
1962 if (!(ioc_feature_control & 0x1))
1963 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1965 /* Restore the iommu BAR */
1966 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1967 iommu->stored_addr_lo);
1968 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1969 iommu->stored_addr_hi);
1971 /* Restore the l1 indirect regs for each of the 6 l1s */
1972 for (i = 0; i < 6; i++)
1973 for (j = 0; j < 0x12; j++)
1974 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1976 /* Restore the l2 indirect regs */
1977 for (i = 0; i < 0x83; i++)
1978 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1980 /* Lock PCI setup registers */
1981 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1982 iommu->stored_addr_lo | 1);
1985 static void iommu_enable_ga(struct amd_iommu *iommu)
1987 #ifdef CONFIG_IRQ_REMAP
1988 switch (amd_iommu_guest_ir) {
1989 case AMD_IOMMU_GUEST_IR_VAPIC:
1990 iommu_feature_enable(iommu, CONTROL_GAM_EN);
1992 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
1993 iommu_feature_enable(iommu, CONTROL_GA_EN);
1994 iommu->irte_ops = &irte_128_ops;
1997 iommu->irte_ops = &irte_32_ops;
2004 * This function finally enables all IOMMUs found in the system after
2005 * they have been initialized
2007 static void early_enable_iommus(void)
2009 struct amd_iommu *iommu;
2011 for_each_iommu(iommu) {
2012 iommu_disable(iommu);
2013 iommu_init_flags(iommu);
2014 iommu_set_device_table(iommu);
2015 iommu_enable_command_buffer(iommu);
2016 iommu_enable_event_buffer(iommu);
2017 iommu_set_exclusion_range(iommu);
2018 iommu_enable_ga(iommu);
2019 iommu_enable(iommu);
2020 iommu_flush_all_caches(iommu);
2023 #ifdef CONFIG_IRQ_REMAP
2024 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2025 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2029 static void enable_iommus_v2(void)
2031 struct amd_iommu *iommu;
2033 for_each_iommu(iommu) {
2034 iommu_enable_ppr_log(iommu);
2035 iommu_enable_gt(iommu);
2039 static void enable_iommus(void)
2041 early_enable_iommus();
2046 static void disable_iommus(void)
2048 struct amd_iommu *iommu;
2050 for_each_iommu(iommu)
2051 iommu_disable(iommu);
2053 #ifdef CONFIG_IRQ_REMAP
2054 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2055 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2060 * Suspend/Resume support
2061 * disable suspend until real resume implemented
2064 static void amd_iommu_resume(void)
2066 struct amd_iommu *iommu;
2068 for_each_iommu(iommu)
2069 iommu_apply_resume_quirks(iommu);
2071 /* re-load the hardware */
2074 amd_iommu_enable_interrupts();
2077 static int amd_iommu_suspend(void)
2079 /* disable IOMMUs to go out of the way for BIOS */
2085 static struct syscore_ops amd_iommu_syscore_ops = {
2086 .suspend = amd_iommu_suspend,
2087 .resume = amd_iommu_resume,
2090 static void __init free_on_init_error(void)
2092 free_pages((unsigned long)irq_lookup_table,
2093 get_order(rlookup_table_size));
2095 kmem_cache_destroy(amd_iommu_irq_cache);
2096 amd_iommu_irq_cache = NULL;
2098 free_pages((unsigned long)amd_iommu_rlookup_table,
2099 get_order(rlookup_table_size));
2101 free_pages((unsigned long)amd_iommu_alias_table,
2102 get_order(alias_table_size));
2104 free_pages((unsigned long)amd_iommu_dev_table,
2105 get_order(dev_table_size));
2109 #ifdef CONFIG_GART_IOMMU
2111 * We failed to initialize the AMD IOMMU - try fallback to GART
2119 /* SB IOAPIC is always on this device in AMD systems */
2120 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2122 static bool __init check_ioapic_information(void)
2124 const char *fw_bug = FW_BUG;
2125 bool ret, has_sb_ioapic;
2128 has_sb_ioapic = false;
2132 * If we have map overrides on the kernel command line the
2133 * messages in this function might not describe firmware bugs
2134 * anymore - so be careful
2139 for (idx = 0; idx < nr_ioapics; idx++) {
2140 int devid, id = mpc_ioapic_id(idx);
2142 devid = get_ioapic_devid(id);
2144 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2147 } else if (devid == IOAPIC_SB_DEVID) {
2148 has_sb_ioapic = true;
2153 if (!has_sb_ioapic) {
2155 * We expect the SB IOAPIC to be listed in the IVRS
2156 * table. The system timer is connected to the SB IOAPIC
2157 * and if we don't have it in the list the system will
2158 * panic at boot time. This situation usually happens
2159 * when the BIOS is buggy and provides us the wrong
2160 * device id for the IOAPIC in the system.
2162 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
2166 pr_err("AMD-Vi: Disabling interrupt remapping\n");
2171 static void __init free_dma_resources(void)
2173 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2174 get_order(MAX_DOMAIN_ID/8));
2180 * This is the hardware init function for AMD IOMMU in the system.
2181 * This function is called either from amd_iommu_init or from the interrupt
2182 * remapping setup code.
2184 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2187 * 1 pass) Discover the most comprehensive IVHD type to use.
2189 * 2 pass) Find the highest PCI device id the driver has to handle.
2190 * Upon this information the size of the data structures is
2191 * determined that needs to be allocated.
2193 * 3 pass) Initialize the data structures just allocated with the
2194 * information in the ACPI table about available AMD IOMMUs
2195 * in the system. It also maps the PCI devices in the
2196 * system to specific IOMMUs
2198 * 4 pass) After the basic data structures are allocated and
2199 * initialized we update them with information about memory
2200 * remapping requirements parsed out of the ACPI table in
2203 * After everything is set up the IOMMUs are enabled and the necessary
2204 * hotplug and suspend notifiers are registered.
2206 static int __init early_amd_iommu_init(void)
2208 struct acpi_table_header *ivrs_base;
2209 acpi_size ivrs_size;
2211 int i, remap_cache_sz, ret = 0;
2213 if (!amd_iommu_detected)
2216 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
2217 if (status == AE_NOT_FOUND)
2219 else if (ACPI_FAILURE(status)) {
2220 const char *err = acpi_format_exception(status);
2221 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2226 * Validate checksum here so we don't need to do it when
2227 * we actually parse the table
2229 ret = check_ivrs_checksum(ivrs_base);
2233 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2234 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2237 * First parse ACPI tables to find the largest Bus/Dev/Func
2238 * we need to handle. Upon this information the shared data
2239 * structures for the IOMMUs in the system will be allocated
2241 ret = find_last_devid_acpi(ivrs_base);
2245 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2246 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2247 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2249 /* Device table - directly used by all IOMMUs */
2251 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2252 get_order(dev_table_size));
2253 if (amd_iommu_dev_table == NULL)
2257 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2258 * IOMMU see for that device
2260 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2261 get_order(alias_table_size));
2262 if (amd_iommu_alias_table == NULL)
2265 /* IOMMU rlookup table - find the IOMMU for a specific device */
2266 amd_iommu_rlookup_table = (void *)__get_free_pages(
2267 GFP_KERNEL | __GFP_ZERO,
2268 get_order(rlookup_table_size));
2269 if (amd_iommu_rlookup_table == NULL)
2272 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2273 GFP_KERNEL | __GFP_ZERO,
2274 get_order(MAX_DOMAIN_ID/8));
2275 if (amd_iommu_pd_alloc_bitmap == NULL)
2279 * let all alias entries point to itself
2281 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2282 amd_iommu_alias_table[i] = i;
2285 * never allocate domain 0 because its used as the non-allocated and
2286 * error value placeholder
2288 amd_iommu_pd_alloc_bitmap[0] = 1;
2290 spin_lock_init(&amd_iommu_pd_lock);
2293 * now the data structures are allocated and basically initialized
2294 * start the real acpi table scan
2296 ret = init_iommu_all(ivrs_base);
2300 if (amd_iommu_irq_remap)
2301 amd_iommu_irq_remap = check_ioapic_information();
2303 if (amd_iommu_irq_remap) {
2305 * Interrupt remapping enabled, create kmem_cache for the
2309 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2310 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2312 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2313 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2315 IRQ_TABLE_ALIGNMENT,
2317 if (!amd_iommu_irq_cache)
2320 irq_lookup_table = (void *)__get_free_pages(
2321 GFP_KERNEL | __GFP_ZERO,
2322 get_order(rlookup_table_size));
2323 if (!irq_lookup_table)
2327 ret = init_memory_definitions(ivrs_base);
2331 /* init the device table */
2332 init_device_table();
2335 /* Don't leak any ACPI memory */
2336 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
2342 static int amd_iommu_enable_interrupts(void)
2344 struct amd_iommu *iommu;
2347 for_each_iommu(iommu) {
2348 ret = iommu_init_msi(iommu);
2357 static bool detect_ivrs(void)
2359 struct acpi_table_header *ivrs_base;
2360 acpi_size ivrs_size;
2363 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
2364 if (status == AE_NOT_FOUND)
2366 else if (ACPI_FAILURE(status)) {
2367 const char *err = acpi_format_exception(status);
2368 pr_err("AMD-Vi: IVRS table error: %s\n", err);
2372 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
2374 /* Make sure ACS will be enabled during PCI probe */
2380 /****************************************************************************
2382 * AMD IOMMU Initialization State Machine
2384 ****************************************************************************/
2386 static int __init state_next(void)
2390 switch (init_state) {
2391 case IOMMU_START_STATE:
2392 if (!detect_ivrs()) {
2393 init_state = IOMMU_NOT_FOUND;
2396 init_state = IOMMU_IVRS_DETECTED;
2399 case IOMMU_IVRS_DETECTED:
2400 ret = early_amd_iommu_init();
2401 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2403 case IOMMU_ACPI_FINISHED:
2404 early_enable_iommus();
2405 register_syscore_ops(&amd_iommu_syscore_ops);
2406 x86_platform.iommu_shutdown = disable_iommus;
2407 init_state = IOMMU_ENABLED;
2410 ret = amd_iommu_init_pci();
2411 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2414 case IOMMU_PCI_INIT:
2415 ret = amd_iommu_enable_interrupts();
2416 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2418 case IOMMU_INTERRUPTS_EN:
2419 ret = amd_iommu_init_dma_ops();
2420 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2423 init_state = IOMMU_INITIALIZED;
2425 case IOMMU_INITIALIZED:
2428 case IOMMU_NOT_FOUND:
2429 case IOMMU_INIT_ERROR:
2430 /* Error states => do nothing */
2441 static int __init iommu_go_to_state(enum iommu_init_state state)
2445 while (init_state != state) {
2447 if (init_state == IOMMU_NOT_FOUND ||
2448 init_state == IOMMU_INIT_ERROR)
2455 #ifdef CONFIG_IRQ_REMAP
2456 int __init amd_iommu_prepare(void)
2460 amd_iommu_irq_remap = true;
2462 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2465 return amd_iommu_irq_remap ? 0 : -ENODEV;
2468 int __init amd_iommu_enable(void)
2472 ret = iommu_go_to_state(IOMMU_ENABLED);
2476 irq_remapping_enabled = 1;
2481 void amd_iommu_disable(void)
2483 amd_iommu_suspend();
2486 int amd_iommu_reenable(int mode)
2493 int __init amd_iommu_enable_faulting(void)
2495 /* We enable MSI later when PCI is initialized */
2501 * This is the core init function for AMD IOMMU hardware in the system.
2502 * This function is called from the generic x86 DMA layer initialization
2505 static int __init amd_iommu_init(void)
2509 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2511 free_dma_resources();
2512 if (!irq_remapping_enabled) {
2514 free_on_init_error();
2516 struct amd_iommu *iommu;
2518 uninit_device_table_dma();
2519 for_each_iommu(iommu)
2520 iommu_flush_all_caches(iommu);
2527 /****************************************************************************
2529 * Early detect code. This code runs at IOMMU detection time in the DMA
2530 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2533 ****************************************************************************/
2534 int __init amd_iommu_detect(void)
2538 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2541 if (amd_iommu_disabled)
2544 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2548 amd_iommu_detected = true;
2550 x86_init.iommu.iommu_init = amd_iommu_init;
2555 /****************************************************************************
2557 * Parsing functions for the AMD IOMMU specific kernel command line
2560 ****************************************************************************/
2562 static int __init parse_amd_iommu_dump(char *str)
2564 amd_iommu_dump = true;
2569 static int __init parse_amd_iommu_intr(char *str)
2571 for (; *str; ++str) {
2572 if (strncmp(str, "legacy", 6) == 0) {
2573 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2576 if (strncmp(str, "vapic", 5) == 0) {
2577 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2584 static int __init parse_amd_iommu_options(char *str)
2586 for (; *str; ++str) {
2587 if (strncmp(str, "fullflush", 9) == 0)
2588 amd_iommu_unmap_flush = true;
2589 if (strncmp(str, "off", 3) == 0)
2590 amd_iommu_disabled = true;
2591 if (strncmp(str, "force_isolation", 15) == 0)
2592 amd_iommu_force_isolation = true;
2598 static int __init parse_ivrs_ioapic(char *str)
2600 unsigned int bus, dev, fn;
2604 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2607 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2611 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2612 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2617 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2619 cmdline_maps = true;
2620 i = early_ioapic_map_size++;
2621 early_ioapic_map[i].id = id;
2622 early_ioapic_map[i].devid = devid;
2623 early_ioapic_map[i].cmd_line = true;
2628 static int __init parse_ivrs_hpet(char *str)
2630 unsigned int bus, dev, fn;
2634 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2637 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2641 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2642 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2647 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2649 cmdline_maps = true;
2650 i = early_hpet_map_size++;
2651 early_hpet_map[i].id = id;
2652 early_hpet_map[i].devid = devid;
2653 early_hpet_map[i].cmd_line = true;
2658 static int __init parse_ivrs_acpihid(char *str)
2661 char *hid, *uid, *p;
2662 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2665 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2667 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2672 hid = strsep(&p, ":");
2675 if (!hid || !(*hid) || !uid) {
2676 pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2680 i = early_acpihid_map_size++;
2681 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2682 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2683 early_acpihid_map[i].devid =
2684 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2685 early_acpihid_map[i].cmd_line = true;
2690 __setup("amd_iommu_dump", parse_amd_iommu_dump);
2691 __setup("amd_iommu=", parse_amd_iommu_options);
2692 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
2693 __setup("ivrs_ioapic", parse_ivrs_ioapic);
2694 __setup("ivrs_hpet", parse_ivrs_hpet);
2695 __setup("ivrs_acpihid", parse_ivrs_acpihid);
2697 IOMMU_INIT_FINISH(amd_iommu_detect,
2698 gart_iommu_hole_init,
2702 bool amd_iommu_v2_supported(void)
2704 return amd_iommu_v2_present;
2706 EXPORT_SYMBOL(amd_iommu_v2_supported);
2708 /****************************************************************************
2710 * IOMMU EFR Performance Counter support functionality. This code allows
2711 * access to the IOMMU PC functionality.
2713 ****************************************************************************/
2715 u8 amd_iommu_pc_get_max_banks(u16 devid)
2717 struct amd_iommu *iommu;
2720 /* locate the iommu governing the devid */
2721 iommu = amd_iommu_rlookup_table[devid];
2723 ret = iommu->max_banks;
2727 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2729 bool amd_iommu_pc_supported(void)
2731 return amd_iommu_pc_present;
2733 EXPORT_SYMBOL(amd_iommu_pc_supported);
2735 u8 amd_iommu_pc_get_max_counters(u16 devid)
2737 struct amd_iommu *iommu;
2740 /* locate the iommu governing the devid */
2741 iommu = amd_iommu_rlookup_table[devid];
2743 ret = iommu->max_counters;
2747 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2749 static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
2750 u8 bank, u8 cntr, u8 fxn,
2751 u64 *value, bool is_write)
2756 /* Check for valid iommu and pc register indexing */
2757 if (WARN_ON((fxn > 0x28) || (fxn & 7)))
2760 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
2762 /* Limit the offset to the hw defined mmio region aperture */
2763 max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
2764 (iommu->max_counters << 8) | 0x28);
2765 if ((offset < MMIO_CNTR_REG_OFFSET) ||
2766 (offset > max_offset_lim))
2770 writel((u32)*value, iommu->mmio_base + offset);
2771 writel((*value >> 32), iommu->mmio_base + offset + 4);
2773 *value = readl(iommu->mmio_base + offset + 4);
2775 *value = readl(iommu->mmio_base + offset);
2780 EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2782 int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2783 u64 *value, bool is_write)
2785 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2787 /* Make sure the IOMMU PC resource is available */
2788 if (!amd_iommu_pc_present || iommu == NULL)
2791 return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,