arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
--- ---static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
--- --- struct arm_smmu_ste *dst)
+++ +++static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
--- --- /*
--- --- * This is hideously complicated, but we only really care about
--- --- * three cases at the moment:
--- --- *
--- --- * 1. Invalid (all zero) -> bypass/fault (init)
--- --- * 2. Bypass/fault -> translation/bypass (attach)
--- --- * 3. Translation/bypass -> bypass/fault (detach)
--- --- *
--- --- * Given that we can't update the STE atomically and the SMMU
--- --- * doesn't read the thing in a defined order, that leaves us
--- --- * with the following maintenance requirements:
--- --- *
--- --- * 1. Update Config, return (init time STEs aren't live)
--- --- * 2. Write everything apart from dword 0, sync, write dword 0, sync
--- --- * 3. Update Config, sync
--- --- */
--- --- u64 val = le64_to_cpu(dst->data[0]);
--- --- bool ste_live = false;
--- --- struct arm_smmu_device *smmu = master->smmu;
--- --- struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
--- --- struct arm_smmu_s2_cfg *s2_cfg = NULL;
--- --- struct arm_smmu_domain *smmu_domain = master->domain;
--- --- struct arm_smmu_cmdq_ent prefetch_cmd = {
--- --- .opcode = CMDQ_OP_PREFETCH_CFG,
--- --- .prefetch = {
--- --- .sid = sid,
--- --- },
--- --- };
--- ---
--- --- if (smmu_domain) {
--- --- switch (smmu_domain->stage) {
--- --- case ARM_SMMU_DOMAIN_S1:
--- --- cd_table = &master->cd_table;
--- --- break;
--- --- case ARM_SMMU_DOMAIN_S2:
--- --- s2_cfg = &smmu_domain->s2_cfg;
--- --- break;
--- --- default:
--- --- break;
--- --- }
--- --- }
--- ---
--- --- if (val & STRTAB_STE_0_V) {
--- --- switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
--- --- case STRTAB_STE_0_CFG_BYPASS:
--- --- break;
--- --- case STRTAB_STE_0_CFG_S1_TRANS:
--- --- case STRTAB_STE_0_CFG_S2_TRANS:
--- --- ste_live = true;
--- --- break;
--- --- case STRTAB_STE_0_CFG_ABORT:
--- --- BUG_ON(!disable_bypass);
--- --- break;
--- --- default:
--- --- BUG(); /* STE corruption */
--- --- }
--- --- }
+++ +++ memset(target, 0, sizeof(*target));
+++ +++ target->data[0] = cpu_to_le64(
+++ +++ STRTAB_STE_0_V |
+++ +++ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
+++ +++}
--- --- /* Nuke the existing STE_0 value, as we're going to rewrite it */
--- --- val = STRTAB_STE_0_V;
+++ +++static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
+++ +++{
+++ +++ memset(target, 0, sizeof(*target));
+++ +++ target->data[0] = cpu_to_le64(
+++ +++ STRTAB_STE_0_V |
+++ +++ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
+++ +++ target->data[1] = cpu_to_le64(
+++ +++ FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+++ +++}
--- --- /* Bypass/fault */
--- --- if (!smmu_domain || !(cd_table || s2_cfg)) {
--- --- if (!smmu_domain && disable_bypass)
--- --- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
--- --- else
--- --- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+++ +++static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+++ +++ struct arm_smmu_master *master)
+++ +++{
+++ +++ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+++ +++ struct arm_smmu_device *smmu = master->smmu;
--- --- dst->data[0] = cpu_to_le64(val);
--- --- dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
--- --- STRTAB_STE_1_SHCFG_INCOMING));
--- --- dst->data[2] = 0; /* Nuke the VMID */
+++ +++ memset(target, 0, sizeof(*target));
+++ +++ target->data[0] = cpu_to_le64(
+++ +++ STRTAB_STE_0_V |
+++ +++ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+++ +++ FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
+++ +++ (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+++ +++ FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
+++ +++
+++ +++ target->data[1] = cpu_to_le64(
+++ +++ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
+++ +++ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+++ +++ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+++ +++ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
+++ +++ ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+++ +++ !master->stall_enabled) ?
+++ +++ STRTAB_STE_1_S1STALLD :
+++ +++ 0) |
+++ +++ FIELD_PREP(STRTAB_STE_1_EATS,
+++ +++ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+++ +++
+++ +++ if (smmu->features & ARM_SMMU_FEAT_E2H) {
/*
--- --- * The SMMU can perform negative caching, so we must sync
--- --- * the STE regardless of whether the old value was live.
+++ +++ * To support BTM the streamworld needs to match the
+++ +++ * configuration of the CPU so that the ASID broadcasts are
+++ +++ * properly matched. This means either S/NS-EL2-E2H (hypervisor)
+++ +++ * or NS-EL1 (guest). Since an SVA domain can be installed in a
+++ +++ * PASID this should always use a BTM compatible configuration
+++ +++ * if the HW supports it.
*/
--- --- if (smmu)
--- --- arm_smmu_sync_ste_for_sid(smmu, sid);
--- --- return;
--- --- }
--- ---
--- --- if (cd_table) {
--- --- u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
--- --- STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
--- ---
--- --- BUG_ON(ste_live);
--- --- dst->data[1] = cpu_to_le64(
--- --- FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
--- --- FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
--- --- FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
--- --- FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
--- --- FIELD_PREP(STRTAB_STE_1_STRW, strw));
--- ---
--- --- if (smmu->features & ARM_SMMU_FEAT_STALLS &&
--- --- !master->stall_enabled)
--- --- dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
--- ---
--- --- val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
--- --- FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
--- --- FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
--- --- FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
--- --- }
+++ +++ target->data[1] |= cpu_to_le64(
+++ +++ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
+++ +++ } else {
+++ +++ target->data[1] |= cpu_to_le64(
+++ +++ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
--- --- if (s2_cfg) {
--- --- BUG_ON(ste_live);
--- --- dst->data[2] = cpu_to_le64(
--- --- FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
--- --- FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
+++ +++ /*
+++ +++ * VMID 0 is reserved for stage-2 bypass EL1 STEs, see
+++ +++ * arm_smmu_domain_alloc_id()
+++ +++ */
+++ +++ target->data[2] =
+++ +++ cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
+++ +++ }
+++ +++}
+++ +++
+++ +++static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+++ +++ struct arm_smmu_master *master,
+++ +++ struct arm_smmu_domain *smmu_domain)
+++ +++{
+++ +++ struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
+++ +++ const struct io_pgtable_cfg *pgtbl_cfg =
+++ +++ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+++ +++ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
+++ +++ &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+++ +++ u64 vtcr_val;
+++ +++
+++ +++ memset(target, 0, sizeof(*target));
+++ +++ target->data[0] = cpu_to_le64(
+++ +++ STRTAB_STE_0_V |
+++ +++ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
+++ +++
+++ +++ target->data[1] = cpu_to_le64(
+++ +++ FIELD_PREP(STRTAB_STE_1_EATS,
+++ +++ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
+++ +++ FIELD_PREP(STRTAB_STE_1_SHCFG,
+++ +++ STRTAB_STE_1_SHCFG_INCOMING));
+++ +++
+++ +++ vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
+++ +++ target->data[2] = cpu_to_le64(
+++ +++ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+++ +++ FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) |
+++ +++ STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
--- --- STRTAB_STE_2_S2ENDI |
+++ +++ STRTAB_STE_2_S2ENDI |
#endif
--- --- STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
--- --- STRTAB_STE_2_S2R);
-
- dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
-
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
- }
-
- if (master->ats_enabled)
- dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
- STRTAB_STE_1_EATS_TRANS));
-
- arm_smmu_sync_ste_for_sid(smmu, sid);
- /* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst->data[0], cpu_to_le64(val));
- arm_smmu_sync_ste_for_sid(smmu, sid);
+++ +++ STRTAB_STE_2_S2PTW |
+++ +++ STRTAB_STE_2_S2R);
--- -- dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
--- --
--- -- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
--- -- }
--- --
--- -- if (master->ats_enabled)
--- -- dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
--- -- STRTAB_STE_1_EATS_TRANS));
--- --
--- -- arm_smmu_sync_ste_for_sid(smmu, sid);
--- -- /* See comment in arm_smmu_write_ctx_desc() */
--- -- WRITE_ONCE(dst->data[0], cpu_to_le64(val));
--- -- arm_smmu_sync_ste_for_sid(smmu, sid);
--- --
--- --- /* It's likely that we'll want to use the new STE soon */
--- --- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
--- --- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+++ +++ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
+++ +++ STRTAB_STE_3_S2TTB_MASK);
}
--- ---static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
--- --- unsigned int nent, bool force)
+++ +++/*
+++ +++ * This can safely directly manipulate the STE memory without a sync sequence
+++ +++ * because the STE table has not been installed in the SMMU yet.
+++ +++ */
+++ +++static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
+++ +++ unsigned int nent)
{
unsigned int i;
--- --- u64 val = STRTAB_STE_0_V;
--- ---
--- --- if (disable_bypass && !force)
--- --- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
--- --- else
--- --- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
--- --- strtab->data[0] = cpu_to_le64(val);
--- --- strtab->data[1] = cpu_to_le64(FIELD_PREP(
--- --- STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
--- --- strtab->data[2] = 0;
+++ +++ if (disable_bypass)
+++ +++ arm_smmu_make_abort_ste(strtab);
+++ +++ else
+++ +++ arm_smmu_make_bypass_ste(strtab);
strtab++;
}
}
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
---- -- if (!pdev)
++++ ++ mutex_lock(&iommu->iopf_lock);
++++ ++ dev = device_rbtree_find(iommu, req->rid);
++++ ++ if (!dev) {
++++ ++ mutex_unlock(&iommu->iopf_lock);
goto bad_req;
++++ ++ }
---- - if (intel_svm_prq_report(iommu, &pdev->dev, req))
- if (intel_svm_prq_report(iommu, dev, req))
------ handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
------ else
---- - trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
- trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
------ req->priv_data[0], req->priv_data[1],
------ iommu->prq_seq_number++);
---- - pci_dev_put(pdev);
- intel_svm_prq_report(iommu, &pdev->dev, req);
- trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
+++++++ intel_svm_prq_report(iommu, dev, req);
+++++++ trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
++++++ req->priv_data[0], req->priv_data[1],
++++++ iommu->prq_seq_number++);
- pci_dev_put(pdev);
++++ ++ mutex_unlock(&iommu->iopf_lock);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
qi_submit_sync(iommu, &desc, 1, 0);
}
------ out:
------ return ret;
---- - }
---- -
---- - static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
---- - struct device *dev, ioasid_t pasid)
---- - {
---- - struct device_domain_info *info = dev_iommu_priv_get(dev);
---- - struct intel_iommu *iommu = info->iommu;
---- -
---- - return intel_svm_bind_mm(iommu, dev, domain, pasid);
}
-static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
-
- return intel_svm_bind_mm(iommu, dev, domain, pasid);
-}
-
static void intel_svm_domain_free(struct iommu_domain *domain)
{
kfree(to_dmar_domain(domain));