Merge tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv...
[linux-2.6-block.git] / drivers / iommu / amd_iommu.c
index f93b148cf55e78320392d40656b02159d3597e0a..1ed3b98324bac09ce8399b2ee0ad445e08802d20 100644 (file)
@@ -436,7 +436,7 @@ static int iommu_init_device(struct device *dev)
         * invalid address), we ignore the capability for the device so
         * it'll be forced to go into translation mode.
         */
-       if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+       if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
            dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
                struct amd_iommu *iommu;
 
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
        iommu_completion_wait(iommu);
 }
 
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                             dom_id, 1);
+       iommu_queue_command(iommu, &cmd);
+
+       iommu_completion_wait(iommu);
+}
+
 static void amd_iommu_flush_all(struct amd_iommu *iommu)
 {
        struct iommu_cmd cmd;
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
  * another level increases the size of the address space by 9 bits to a size up
  * to 64 bits.
  */
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
                                   gfp_t gfp)
 {
+       unsigned long flags;
        u64 *pte;
 
-       if (domain->mode == PAGE_MODE_6_LEVEL)
+       spin_lock_irqsave(&domain->lock, flags);
+
+       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                /* address space already 64 bit large */
-               return false;
+               goto out;
 
        pte = (void *)get_zeroed_page(gfp);
        if (!pte)
-               return false;
+               goto out;
 
        *pte             = PM_LEVEL_PDE(domain->mode,
                                        iommu_virt_to_phys(domain->pt_root));
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
        domain->mode    += 1;
        domain->updated  = true;
 
-       return true;
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return;
 }
 
 static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
 {
        u64 pte_root = 0;
        u64 flags = 0;
+       u32 old_domid;
 
        if (domain->mode != PAGE_MODE_NONE)
                pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
        flags &= ~DEV_DOMID_MASK;
        flags |= domain->id;
 
+       old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
        amd_iommu_dev_table[devid].data[1]  = flags;
        amd_iommu_dev_table[devid].data[0]  = pte_root;
+
+       /*
+        * A kdump kernel might be replacing a domain ID that was copied from
+        * the previous kernel--if so, it needs to flush the translation cache
+        * entries for the old domain ID that is being overwritten
+        */
+       if (old_domid) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+               amd_iommu_flush_tlb_domid(iommu, old_domid);
+       }
 }
 
 static void clear_dte_entry(u16 devid)
@@ -2226,7 +2256,7 @@ static int amd_iommu_add_device(struct device *dev)
 
        BUG_ON(!dev_data);
 
-       if (iommu_pass_through || dev_data->iommu_v2)
+       if (dev_data->iommu_v2)
                iommu_request_dm_for_dev(dev);
 
        /* Domains are initialized for this device - have a look what we ended up with */
@@ -2547,7 +2577,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
 
                        bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
                        phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
-                       ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+                       ret = iommu_map_page(domain, bus_addr, phys_addr,
+                                            PAGE_SIZE, prot,
+                                            GFP_ATOMIC | __GFP_NOWARN);
                        if (ret)
                                goto out_unmap;
 
@@ -2805,7 +2837,7 @@ int __init amd_iommu_init_api(void)
 
 int __init amd_iommu_init_dma_ops(void)
 {
-       swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
+       swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
 
        if (amd_iommu_unmap_flush)
@@ -3055,7 +3087,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
 }
 
 static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
-                          size_t page_size)
+                             size_t page_size,
+                             struct iommu_iotlb_gather *gather)
 {
        struct protection_domain *domain = to_pdomain(dom);
        size_t unmap_size;
@@ -3196,6 +3229,12 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
        domain_flush_complete(dom);
 }
 
+static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+                                struct iommu_iotlb_gather *gather)
+{
+       amd_iommu_flush_iotlb_all(domain);
+}
+
 const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
@@ -3214,7 +3253,7 @@ const struct iommu_ops amd_iommu_ops = {
        .is_attach_deferred = amd_iommu_is_attach_deferred,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
        .flush_iotlb_all = amd_iommu_flush_iotlb_all,
-       .iotlb_sync = amd_iommu_flush_iotlb_all,
+       .iotlb_sync = amd_iommu_iotlb_sync,
 };
 
 /*****************************************************************************
@@ -4307,13 +4346,62 @@ static const struct irq_domain_ops amd_ir_domain_ops = {
        .deactivate = irq_remapping_deactivate,
 };
 
+int amd_iommu_activate_guest_mode(void *data)
+{
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !entry || entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       entry->lo.val = 0;
+       entry->hi.val = 0;
+
+       entry->lo.fields_vapic.guest_mode  = 1;
+       entry->lo.fields_vapic.ga_log_intr = 1;
+       entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
+       entry->hi.fields.vector            = ir_data->ga_vector;
+       entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
+
+       return modify_irte_ga(ir_data->irq_2_irte.devid,
+                             ir_data->irq_2_irte.index, entry, NULL);
+}
+EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+
+int amd_iommu_deactivate_guest_mode(void *data)
+{
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+       struct irq_cfg *cfg = ir_data->cfg;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !entry || !entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       entry->lo.val = 0;
+       entry->hi.val = 0;
+
+       entry->lo.fields_remap.dm          = apic->irq_dest_mode;
+       entry->lo.fields_remap.int_type    = apic->irq_delivery_mode;
+       entry->hi.fields.vector            = cfg->vector;
+       entry->lo.fields_remap.destination =
+                               APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+       entry->hi.fields.destination =
+                               APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+
+       return modify_irte_ga(ir_data->irq_2_irte.devid,
+                             ir_data->irq_2_irte.index, entry, NULL);
+}
+EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+
 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
 {
+       int ret;
        struct amd_iommu *iommu;
        struct amd_iommu_pi_data *pi_data = vcpu_info;
        struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
        struct amd_ir_data *ir_data = data->chip_data;
-       struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
        struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
        struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
 
@@ -4324,6 +4412,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
        if (!dev_data || !dev_data->use_vapic)
                return 0;
 
+       ir_data->cfg = irqd_cfg(data);
        pi_data->ir_data = ir_data;
 
        /* Note:
@@ -4342,37 +4431,24 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
 
        pi_data->prev_ga_tag = ir_data->cached_ga_tag;
        if (pi_data->is_guest_mode) {
-               /* Setting */
-               irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
-               irte->hi.fields.vector = vcpu_pi_info->vector;
-               irte->lo.fields_vapic.ga_log_intr = 1;
-               irte->lo.fields_vapic.guest_mode = 1;
-               irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
-
-               ir_data->cached_ga_tag = pi_data->ga_tag;
+               ir_data->ga_root_ptr = (pi_data->base >> 12);
+               ir_data->ga_vector = vcpu_pi_info->vector;
+               ir_data->ga_tag = pi_data->ga_tag;
+               ret = amd_iommu_activate_guest_mode(ir_data);
+               if (!ret)
+                       ir_data->cached_ga_tag = pi_data->ga_tag;
        } else {
-               /* Un-Setting */
-               struct irq_cfg *cfg = irqd_cfg(data);
-
-               irte->hi.val = 0;
-               irte->lo.val = 0;
-               irte->hi.fields.vector = cfg->vector;
-               irte->lo.fields_remap.guest_mode = 0;
-               irte->lo.fields_remap.destination =
-                               APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
-               irte->hi.fields.destination =
-                               APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
-               irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
-               irte->lo.fields_remap.dm = apic->irq_dest_mode;
+               ret = amd_iommu_deactivate_guest_mode(ir_data);
 
                /*
                 * This communicates the ga_tag back to the caller
                 * so that it can do all the necessary clean up.
                 */
-               ir_data->cached_ga_tag = 0;
+               if (!ret)
+                       ir_data->cached_ga_tag = 0;
        }
 
-       return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
+       return ret;
 }