iommu/arm-smmu: Skip the execution of CMD_PREFETCH_CONFIG
authorZhen Lei <thunder.leizhen@huawei.com>
Tue, 7 Jul 2015 03:30:18 +0000 (04:30 +0100)
committerWill Deacon <will.deacon@arm.com>
Wed, 8 Jul 2015 16:24:39 +0000 (17:24 +0100)
Hisilicon SMMUv3 devices treat CMD_PREFETCH_CONFIG as a illegal command,
execute it will trigger GERROR interrupt. Although the gerror code manage
to turn the prefetch into a SYNC, and the system can continue to run
normally, but it's ugly to print error information.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
[will: extended binding documentation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
drivers/iommu/arm-smmu-v3.c

index c03eec1168721bdb1c851f7c42eba34ec48f4ba7..3443e0f838dfc8a53e548527a05cf9892f2c2a92 100644 (file)
@@ -35,3 +35,6 @@ the PCIe specification.
 
                       NOTE: this only applies to the SMMU itself, not
                       masters connected upstream of the SMMU.
+
+- hisilicon,broken-prefetch-cmd
+                    : Avoid sending CMD_PREFETCH_* commands to the SMMU.
index 29cba3280af7ff20ddd3387f457f10476c6d8713..da902baaa7946aac569b7ebe8a316c647dfd8187 100644 (file)
@@ -543,6 +543,9 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_HYP              (1 << 12)
        u32                             features;
 
+#define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
+       u32                             options;
+
        struct arm_smmu_cmdq            cmdq;
        struct arm_smmu_evtq            evtq;
        struct arm_smmu_priq            priq;
@@ -603,11 +606,35 @@ struct arm_smmu_domain {
 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
 static LIST_HEAD(arm_smmu_devices);
 
+struct arm_smmu_option_prop {
+       u32 opt;
+       const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+       { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+       { 0, NULL},
+};
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+       int i = 0;
+
+       do {
+               if (of_property_read_bool(smmu->dev->of_node,
+                                               arm_smmu_options[i].prop)) {
+                       smmu->options |= arm_smmu_options[i].opt;
+                       dev_notice(smmu->dev, "option %s\n",
+                               arm_smmu_options[i].prop);
+               }
+       } while (arm_smmu_options[++i].opt);
+}
+
 /* Low-level queue manipulation functions */
 static bool queue_full(struct arm_smmu_queue *q)
 {
@@ -1037,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
-       arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+       if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+               arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
 }
 
 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -2575,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        if (irq > 0)
                smmu->gerr_irq = irq;
 
+       parse_driver_options(smmu);
+
        /* Probe the h/w */
        ret = arm_smmu_device_probe(smmu);
        if (ret)