iommu/tegra241-cmdqv: Limit CMDs for VCMDQs of a guest owned VINTF
authorNicolin Chen <nicolinc@nvidia.com>
Thu, 29 Aug 2024 22:34:39 +0000 (15:34 -0700)
committerWill Deacon <will@kernel.org>
Fri, 30 Aug 2024 14:28:25 +0000 (15:28 +0100)
When VCMDQs are assigned to a VINTF owned by a guest (HYP_OWN bit unset),
only TLB and ATC invalidation commands are supported by the VCMDQ HW. So,
implement the new cmdq->supports_cmd op to scan the input cmd in order to
make sure that it is supported by the selected queue.

Note that the guest VM shouldn't have HYP_OWN bit being set regardless of
guest kernel driver writing it or not, i.e. the hypervisor running in the
host OS should wire this bit to zero when trapping a write access to this
VINTF_CONFIG register from a guest kernel.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Link: https://lore.kernel.org/r/8160292337059b91271045800e5c62f7295e2c24.1724970714.git.nicolinc@nvidia.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c

index 1e67fd4fbcf32250202ea3e757b29a7a15ffc9d6..0c28e2b5b723ba6a5fcd6db077d0a5826602ef78 100644 (file)
@@ -346,12 +346,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
        return 0;
 }
 
-static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
+static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu,
+                                              struct arm_smmu_cmdq_ent *ent)
 {
        struct arm_smmu_cmdq *cmdq = NULL;
 
        if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq)
-               cmdq = smmu->impl_ops->get_secondary_cmdq(smmu);
+               cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent);
 
        return cmdq ?: &smmu->cmdq;
 }
@@ -897,7 +898,7 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
        }
 
        return arm_smmu_cmdq_issue_cmdlist(
-               smmu, arm_smmu_get_cmdq(smmu), cmd, 1, sync);
+               smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync);
 }
 
 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -913,10 +914,11 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
 }
 
 static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu,
-                                    struct arm_smmu_cmdq_batch *cmds)
+                                    struct arm_smmu_cmdq_batch *cmds,
+                                    struct arm_smmu_cmdq_ent *ent)
 {
        cmds->num = 0;
-       cmds->cmdq = arm_smmu_get_cmdq(smmu);
+       cmds->cmdq = arm_smmu_get_cmdq(smmu, ent);
 }
 
 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
@@ -931,13 +933,13 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
        if (force_sync || unsupported_cmd) {
                arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
                                            cmds->num, true);
-               arm_smmu_cmdq_batch_init(smmu, cmds);
+               arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
        }
 
        if (cmds->num == CMDQ_BATCH_ENTRIES) {
                arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
                                            cmds->num, false);
-               arm_smmu_cmdq_batch_init(smmu, cmds);
+               arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
        }
 
        index = cmds->num * CMDQ_ENT_DWORDS;
@@ -1205,7 +1207,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
                },
        };
 
-       arm_smmu_cmdq_batch_init(smmu, &cmds);
+       arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd);
        for (i = 0; i < master->num_streams; i++) {
                cmd.cfgi.sid = master->streams[i].id;
                arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
@@ -2056,7 +2058,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
 
        arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
 
-       arm_smmu_cmdq_batch_init(master->smmu, &cmds);
+       arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd);
        for (i = 0; i < master->num_streams; i++) {
                cmd.atc.sid = master->streams[i].id;
                arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
@@ -2071,7 +2073,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
        struct arm_smmu_master_domain *master_domain;
        int i;
        unsigned long flags;
-       struct arm_smmu_cmdq_ent cmd;
+       struct arm_smmu_cmdq_ent cmd = {
+               .opcode = CMDQ_OP_ATC_INV,
+       };
        struct arm_smmu_cmdq_batch cmds;
 
        if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
@@ -2094,7 +2098,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
        if (!atomic_read(&smmu_domain->nr_ats_masters))
                return 0;
 
-       arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds);
+       arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd);
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_for_each_entry(master_domain, &smmu_domain->devices,
@@ -2176,7 +2180,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
                        num_pages++;
        }
 
-       arm_smmu_cmdq_batch_init(smmu, &cmds);
+       arm_smmu_cmdq_batch_init(smmu, &cmds, cmd);
 
        while (iova < end) {
                if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
index 4deb40cfe2e18d948af2d5bb5206b8a07a87ebbe..4d5af5ac8a630eb2c3283af7aa45f214823e33cc 100644 (file)
@@ -642,7 +642,8 @@ struct arm_smmu_strtab_cfg {
 struct arm_smmu_impl_ops {
        int (*device_reset)(struct arm_smmu_device *smmu);
        void (*device_remove)(struct arm_smmu_device *smmu);
-       struct arm_smmu_cmdq *(*get_secondary_cmdq)(struct arm_smmu_device *smmu);
+       struct arm_smmu_cmdq *(*get_secondary_cmdq)(
+               struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent);
 };
 
 /* An SMMUv3 instance */
index 5ac3032ee6dd2078d6e9082d002a7c9aa542bd37..9eb9d959f3e5d36fd086fdf2d27f7650610cd6fb 100644 (file)
@@ -142,6 +142,7 @@ struct tegra241_vcmdq {
  * struct tegra241_vintf - Virtual Interface
  * @idx: Global index in the CMDQV
  * @enabled: Enable status
+ * @hyp_own: Owned by hypervisor (in-kernel)
  * @cmdqv: Parent CMDQV pointer
  * @lvcmdqs: List of logical VCMDQ pointers
  * @base: MMIO base address
@@ -150,6 +151,7 @@ struct tegra241_vintf {
        u16 idx;
 
        bool enabled;
+       bool hyp_own;
 
        struct tegra241_cmdqv *cmdqv;
        struct tegra241_vcmdq **lvcmdqs;
@@ -301,8 +303,21 @@ static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
 
 /* Command Queue Function */
 
+static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
+{
+       switch (ent->opcode) {
+       case CMDQ_OP_TLBI_NH_ASID:
+       case CMDQ_OP_TLBI_NH_VA:
+       case CMDQ_OP_ATC_INV:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static struct arm_smmu_cmdq *
-tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu)
+tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
+                       struct arm_smmu_cmdq_ent *ent)
 {
        struct tegra241_cmdqv *cmdqv =
                container_of(smmu, struct tegra241_cmdqv, smmu);
@@ -328,6 +343,10 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu)
        vcmdq = vintf->lvcmdqs[lidx];
        if (!vcmdq || !READ_ONCE(vcmdq->enabled))
                return NULL;
+
+       /* Unsupported CMD goes for smmu->cmdq pathway */
+       if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
+               return NULL;
        return &vcmdq->cmdq;
 }
 
@@ -406,12 +425,22 @@ static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
        tegra241_vintf_hw_deinit(vintf);
 
        /* Configure and enable VINTF */
+       /*
+        * Note that HYP_OWN bit is wired to zero when running in guest kernel,
+        * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
+        * restricted set of supported commands.
+        */
        regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
        writel(regval, REG_VINTF(vintf, CONFIG));
 
        ret = vintf_write_config(vintf, regval | VINTF_EN);
        if (ret)
                return ret;
+       /*
+        * As being mentioned above, HYP_OWN bit is wired to zero for a guest
+        * kernel, so read it back from HW to ensure that reflects in hyp_own
+        */
+       vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
 
        for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
                if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
@@ -493,6 +522,9 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
        q->q_base = q->base_dma & VCMDQ_ADDR;
        q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
 
+       if (!vcmdq->vintf->hyp_own)
+               cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
+
        return arm_smmu_cmdq_init(smmu, cmdq);
 }