return 0;
}
-static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
+static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
{
struct arm_smmu_cmdq *cmdq = NULL;
if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq)
- cmdq = smmu->impl_ops->get_secondary_cmdq(smmu);
+ cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent);
return cmdq ?: &smmu->cmdq;
}
}
return arm_smmu_cmdq_issue_cmdlist(
- smmu, arm_smmu_get_cmdq(smmu), cmd, 1, sync);
+ smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync);
}
static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
}
static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu,
- struct arm_smmu_cmdq_batch *cmds)
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *ent)
{
cmds->num = 0;
- cmds->cmdq = arm_smmu_get_cmdq(smmu);
+ cmds->cmdq = arm_smmu_get_cmdq(smmu, ent);
}
static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
if (force_sync || unsupported_cmd) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
cmds->num, true);
- arm_smmu_cmdq_batch_init(smmu, cmds);
+ arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
}
if (cmds->num == CMDQ_BATCH_ENTRIES) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
cmds->num, false);
- arm_smmu_cmdq_batch_init(smmu, cmds);
+ arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
}
index = cmds->num * CMDQ_ENT_DWORDS;
},
};
- arm_smmu_cmdq_batch_init(smmu, &cmds);
+ arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.cfgi.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
- arm_smmu_cmdq_batch_init(master->smmu, &cmds);
+ arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
struct arm_smmu_master_domain *master_domain;
int i;
unsigned long flags;
- struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_ATC_INV,
+ };
struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
if (!atomic_read(&smmu_domain->nr_ats_masters))
return 0;
- arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds);
+ arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master_domain, &smmu_domain->devices,
num_pages++;
}
- arm_smmu_cmdq_batch_init(smmu, &cmds);
+ arm_smmu_cmdq_batch_init(smmu, &cmds, cmd);
while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
* struct tegra241_vintf - Virtual Interface
* @idx: Global index in the CMDQV
* @enabled: Enable status
+ * @hyp_own: Owned by hypervisor (in-kernel)
* @cmdqv: Parent CMDQV pointer
* @lvcmdqs: List of logical VCMDQ pointers
* @base: MMIO base address
u16 idx;
bool enabled;
+ bool hyp_own;
struct tegra241_cmdqv *cmdqv;
struct tegra241_vcmdq **lvcmdqs;
/* Command Queue Function */
+static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
+{
+ switch (ent->opcode) {
+ case CMDQ_OP_TLBI_NH_ASID:
+ case CMDQ_OP_TLBI_NH_VA:
+ case CMDQ_OP_ATC_INV:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct arm_smmu_cmdq *
-tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu)
+tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
{
struct tegra241_cmdqv *cmdqv =
container_of(smmu, struct tegra241_cmdqv, smmu);
vcmdq = vintf->lvcmdqs[lidx];
if (!vcmdq || !READ_ONCE(vcmdq->enabled))
return NULL;
+
+ /* Unsupported CMD goes for smmu->cmdq pathway */
+ if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
+ return NULL;
return &vcmdq->cmdq;
}
tegra241_vintf_hw_deinit(vintf);
/* Configure and enable VINTF */
+ /*
+ * Note that HYP_OWN bit is wired to zero when running in guest kernel,
+ * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
+ * restricted set of supported commands.
+ */
regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
writel(regval, REG_VINTF(vintf, CONFIG));
ret = vintf_write_config(vintf, regval | VINTF_EN);
if (ret)
return ret;
+ /*
+ * As being mentioned above, HYP_OWN bit is wired to zero for a guest
+ * kernel, so read it back from HW to ensure that reflects in hyp_own
+ */
+ vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
q->q_base = q->base_dma & VCMDQ_ADDR;
q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
+ if (!vcmdq->vintf->hyp_own)
+ cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
+
return arm_smmu_cmdq_init(smmu, cmdq);
}