iommu/amd: Don't use dev_data in irte_ga_set_affinity()
[linux-2.6-block.git] / drivers / iommu / amd_iommu.c
index fba4ec168bd5f1228cf72ef8019aecdc92689951..d666246ac30b7f4d23a019082123ed69cd18fb13 100644 (file)
@@ -617,7 +617,9 @@ retry:
                       address, flags);
                break;
        default:
-               printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
+               printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x "
+                      "event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
+                      type, event[0], event[1], event[2], event[3]);
        }
 
        memset(__evt, 0, 4 * sizeof(u32));
@@ -1054,9 +1056,9 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu,
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&iommu->lock, flags);
+       raw_spin_lock_irqsave(&iommu->lock, flags);
        ret = __iommu_queue_command_sync(iommu, cmd, sync);
-       spin_unlock_irqrestore(&iommu->lock, flags);
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
 
        return ret;
 }
@@ -1082,7 +1084,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
 
-       spin_lock_irqsave(&iommu->lock, flags);
+       raw_spin_lock_irqsave(&iommu->lock, flags);
 
        iommu->cmd_sem = 0;
 
@@ -1093,7 +1095,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
        ret = wait_on_sem(&iommu->cmd_sem);
 
 out_unlock:
-       spin_unlock_irqrestore(&iommu->lock, flags);
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
 
        return ret;
 }
@@ -1816,7 +1818,8 @@ static bool dma_ops_domain(struct protection_domain *domain)
        return domain->flags & PD_DMA_OPS_MASK;
 }
 
-static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
+static void set_dte_entry(u16 devid, struct protection_domain *domain,
+                         bool ats, bool ppr)
 {
        u64 pte_root = 0;
        u64 flags = 0;
@@ -1833,6 +1836,13 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
        if (ats)
                flags |= DTE_FLAG_IOTLB;
 
+       if (ppr) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+               if (iommu_feature(iommu, FEATURE_EPHSUP))
+                       pte_root |= 1ULL << DEV_ENTRY_PPR;
+       }
+
        if (domain->flags & PD_IOMMUV2_MASK) {
                u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
                u64 glx  = domain->glx;
@@ -1895,9 +1905,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
        domain->dev_cnt                 += 1;
 
        /* Update device table */
-       set_dte_entry(dev_data->devid, domain, ats);
+       set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
        if (alias != dev_data->devid)
-               set_dte_entry(alias, domain, ats);
+               set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
 
        device_flush_dte(dev_data);
 }
@@ -2276,13 +2286,15 @@ static void update_device_table(struct protection_domain *domain)
        struct iommu_dev_data *dev_data;
 
        list_for_each_entry(dev_data, &domain->dev_list, list) {
-               set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
+               set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
+                             dev_data->iommu_v2);
 
                if (dev_data->devid == dev_data->alias)
                        continue;
 
                /* There is an alias, update device table entry for it */
-               set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
+               set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
+                             dev_data->iommu_v2);
        }
 }
 
@@ -3615,7 +3627,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
                goto out_unlock;
 
        /* Initialize table spin-lock */
-       spin_lock_init(&table->lock);
+       raw_spin_lock_init(&table->lock);
 
        if (ioapic)
                /* Keep the first 32 indexes free for IOAPIC interrupts */
@@ -3677,7 +3689,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
        if (align)
                alignment = roundup_pow_of_two(count);
 
-       spin_lock_irqsave(&table->lock, flags);
+       raw_spin_lock_irqsave(&table->lock, flags);
 
        /* Scan table for free entries */
        for (index = ALIGN(table->min_index, alignment), c = 0;
@@ -3704,7 +3716,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
        index = -ENOSPC;
 
 out:
-       spin_unlock_irqrestore(&table->lock, flags);
+       raw_spin_unlock_irqrestore(&table->lock, flags);
 
        return index;
 }
@@ -3725,7 +3737,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
        if (!table)
                return -ENOMEM;
 
-       spin_lock_irqsave(&table->lock, flags);
+       raw_spin_lock_irqsave(&table->lock, flags);
 
        entry = (struct irte_ga *)table->table;
        entry = &entry[index];
@@ -3736,7 +3748,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
        if (data)
                data->ref = entry;
 
-       spin_unlock_irqrestore(&table->lock, flags);
+       raw_spin_unlock_irqrestore(&table->lock, flags);
 
        iommu_flush_irt(iommu, devid);
        iommu_completion_wait(iommu);
@@ -3758,9 +3770,9 @@ static int modify_irte(u16 devid, int index, union irte *irte)
        if (!table)
                return -ENOMEM;
 
-       spin_lock_irqsave(&table->lock, flags);
+       raw_spin_lock_irqsave(&table->lock, flags);
        table->table[index] = irte->val;
-       spin_unlock_irqrestore(&table->lock, flags);
+       raw_spin_unlock_irqrestore(&table->lock, flags);
 
        iommu_flush_irt(iommu, devid);
        iommu_completion_wait(iommu);
@@ -3782,9 +3794,9 @@ static void free_irte(u16 devid, int index)
        if (!table)
                return;
 
-       spin_lock_irqsave(&table->lock, flags);
+       raw_spin_lock_irqsave(&table->lock, flags);
        iommu->irte_ops->clear_allocated(table, index);
-       spin_unlock_irqrestore(&table->lock, flags);
+       raw_spin_unlock_irqrestore(&table->lock, flags);
 
        iommu_flush_irt(iommu, devid);
        iommu_completion_wait(iommu);
@@ -3865,10 +3877,8 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
                                 u8 vector, u32 dest_apicid)
 {
        struct irte_ga *irte = (struct irte_ga *) entry;
-       struct iommu_dev_data *dev_data = search_dev_data(devid);
 
-       if (!dev_data || !dev_data->use_vapic ||
-           !irte->lo.fields_remap.guest_mode) {
+       if (!irte->lo.fields_remap.guest_mode) {
                irte->hi.fields.vector = vector;
                irte->lo.fields_remap.destination = dest_apicid;
                modify_irte_ga(devid, index, irte, NULL);
@@ -4385,7 +4395,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
        if (!irt)
                return -ENODEV;
 
-       spin_lock_irqsave(&irt->lock, flags);
+       raw_spin_lock_irqsave(&irt->lock, flags);
 
        if (ref->lo.fields_vapic.guest_mode) {
                if (cpu >= 0)
@@ -4394,7 +4404,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
                barrier();
        }
 
-       spin_unlock_irqrestore(&irt->lock, flags);
+       raw_spin_unlock_irqrestore(&irt->lock, flags);
 
        iommu_flush_irt(iommu, devid);
        iommu_completion_wait(iommu);