2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
37 #include <linux/io-64-nonatomic-hi-lo.h>
38 #include <linux/iommu.h>
39 #include <linux/iopoll.h>
40 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/pci.h>
44 #include <linux/platform_device.h>
45 #include <linux/slab.h>
46 #include <linux/spinlock.h>
48 #include <linux/amba/bus.h>
50 #include "io-pgtable.h"
52 /* Maximum number of stream IDs assigned to a single device */
53 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
55 /* Maximum number of context banks per SMMU */
56 #define ARM_SMMU_MAX_CBS 128
58 /* Maximum number of mapping groups per SMMU */
59 #define ARM_SMMU_MAX_SMRS 128
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 #define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
81 #define smmu_write_atomic_lq writeq_relaxed
83 #define smmu_write_atomic_lq writel_relaxed
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_VMID16EN (1 << 31)
98 #define sCR0_BSU_SHIFT 14
99 #define sCR0_BSU_MASK 0x3
101 /* Identification registers */
102 #define ARM_SMMU_GR0_ID0 0x20
103 #define ARM_SMMU_GR0_ID1 0x24
104 #define ARM_SMMU_GR0_ID2 0x28
105 #define ARM_SMMU_GR0_ID3 0x2c
106 #define ARM_SMMU_GR0_ID4 0x30
107 #define ARM_SMMU_GR0_ID5 0x34
108 #define ARM_SMMU_GR0_ID6 0x38
109 #define ARM_SMMU_GR0_ID7 0x3c
110 #define ARM_SMMU_GR0_sGFSR 0x48
111 #define ARM_SMMU_GR0_sGFSYNR0 0x50
112 #define ARM_SMMU_GR0_sGFSYNR1 0x54
113 #define ARM_SMMU_GR0_sGFSYNR2 0x58
115 #define ID0_S1TS (1 << 30)
116 #define ID0_S2TS (1 << 29)
117 #define ID0_NTS (1 << 28)
118 #define ID0_SMS (1 << 27)
119 #define ID0_ATOSNS (1 << 26)
120 #define ID0_PTFS_NO_AARCH32 (1 << 25)
121 #define ID0_PTFS_NO_AARCH32S (1 << 24)
122 #define ID0_CTTW (1 << 14)
123 #define ID0_NUMIRPT_SHIFT 16
124 #define ID0_NUMIRPT_MASK 0xff
125 #define ID0_NUMSIDB_SHIFT 9
126 #define ID0_NUMSIDB_MASK 0xf
127 #define ID0_NUMSMRG_SHIFT 0
128 #define ID0_NUMSMRG_MASK 0xff
130 #define ID1_PAGESIZE (1 << 31)
131 #define ID1_NUMPAGENDXB_SHIFT 28
132 #define ID1_NUMPAGENDXB_MASK 7
133 #define ID1_NUMS2CB_SHIFT 16
134 #define ID1_NUMS2CB_MASK 0xff
135 #define ID1_NUMCB_SHIFT 0
136 #define ID1_NUMCB_MASK 0xff
138 #define ID2_OAS_SHIFT 4
139 #define ID2_OAS_MASK 0xf
140 #define ID2_IAS_SHIFT 0
141 #define ID2_IAS_MASK 0xf
142 #define ID2_UBS_SHIFT 8
143 #define ID2_UBS_MASK 0xf
144 #define ID2_PTFS_4K (1 << 12)
145 #define ID2_PTFS_16K (1 << 13)
146 #define ID2_PTFS_64K (1 << 14)
147 #define ID2_VMID16 (1 << 15)
149 /* Global TLB invalidation */
150 #define ARM_SMMU_GR0_TLBIVMID 0x64
151 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
152 #define ARM_SMMU_GR0_TLBIALLH 0x6c
153 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
154 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
155 #define sTLBGSTATUS_GSACTIVE (1 << 0)
156 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
158 /* Stream mapping registers */
159 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
160 #define SMR_VALID (1 << 31)
161 #define SMR_MASK_SHIFT 16
162 #define SMR_MASK_MASK 0x7fff
163 #define SMR_ID_SHIFT 0
164 #define SMR_ID_MASK 0x7fff
166 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
167 #define S2CR_CBNDX_SHIFT 0
168 #define S2CR_CBNDX_MASK 0xff
169 #define S2CR_TYPE_SHIFT 16
170 #define S2CR_TYPE_MASK 0x3
171 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
172 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
173 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
175 #define S2CR_PRIVCFG_SHIFT 24
176 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
178 /* Context bank attribute registers */
179 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
180 #define CBAR_VMID_SHIFT 0
181 #define CBAR_VMID_MASK 0xff
182 #define CBAR_S1_BPSHCFG_SHIFT 8
183 #define CBAR_S1_BPSHCFG_MASK 3
184 #define CBAR_S1_BPSHCFG_NSH 3
185 #define CBAR_S1_MEMATTR_SHIFT 12
186 #define CBAR_S1_MEMATTR_MASK 0xf
187 #define CBAR_S1_MEMATTR_WB 0xf
188 #define CBAR_TYPE_SHIFT 16
189 #define CBAR_TYPE_MASK 0x3
190 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
191 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
192 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
193 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
194 #define CBAR_IRPTNDX_SHIFT 24
195 #define CBAR_IRPTNDX_MASK 0xff
197 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
198 #define CBA2R_RW64_32BIT (0 << 0)
199 #define CBA2R_RW64_64BIT (1 << 0)
200 #define CBA2R_VMID_SHIFT 16
201 #define CBA2R_VMID_MASK 0xffff
203 /* Translation context bank */
204 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
205 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
207 #define ARM_SMMU_CB_SCTLR 0x0
208 #define ARM_SMMU_CB_ACTLR 0x4
209 #define ARM_SMMU_CB_RESUME 0x8
210 #define ARM_SMMU_CB_TTBCR2 0x10
211 #define ARM_SMMU_CB_TTBR0 0x20
212 #define ARM_SMMU_CB_TTBR1 0x28
213 #define ARM_SMMU_CB_TTBCR 0x30
214 #define ARM_SMMU_CB_S1_MAIR0 0x38
215 #define ARM_SMMU_CB_S1_MAIR1 0x3c
216 #define ARM_SMMU_CB_PAR 0x50
217 #define ARM_SMMU_CB_FSR 0x58
218 #define ARM_SMMU_CB_FAR 0x60
219 #define ARM_SMMU_CB_FSYNR0 0x68
220 #define ARM_SMMU_CB_S1_TLBIVA 0x600
221 #define ARM_SMMU_CB_S1_TLBIASID 0x610
222 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
223 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
224 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
225 #define ARM_SMMU_CB_ATS1PR 0x800
226 #define ARM_SMMU_CB_ATSR 0x8f0
228 #define SCTLR_S1_ASIDPNE (1 << 12)
229 #define SCTLR_CFCFG (1 << 7)
230 #define SCTLR_CFIE (1 << 6)
231 #define SCTLR_CFRE (1 << 5)
232 #define SCTLR_E (1 << 4)
233 #define SCTLR_AFE (1 << 2)
234 #define SCTLR_TRE (1 << 1)
235 #define SCTLR_M (1 << 0)
236 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
238 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
240 #define CB_PAR_F (1 << 0)
242 #define ATSR_ACTIVE (1 << 0)
244 #define RESUME_RETRY (0 << 0)
245 #define RESUME_TERMINATE (1 << 0)
247 #define TTBCR2_SEP_SHIFT 15
248 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
250 #define TTBRn_ASID_SHIFT 48
252 #define FSR_MULTI (1 << 31)
253 #define FSR_SS (1 << 30)
254 #define FSR_UUT (1 << 8)
255 #define FSR_ASF (1 << 7)
256 #define FSR_TLBLKF (1 << 6)
257 #define FSR_TLBMCF (1 << 5)
258 #define FSR_EF (1 << 4)
259 #define FSR_PF (1 << 3)
260 #define FSR_AFF (1 << 2)
261 #define FSR_TF (1 << 1)
263 #define FSR_IGN (FSR_AFF | FSR_ASF | \
264 FSR_TLBMCF | FSR_TLBLKF)
265 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
266 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
268 #define FSYNR0_WNR (1 << 4)
270 static int force_stage;
271 module_param(force_stage, int, S_IRUGO);
272 MODULE_PARM_DESC(force_stage,
273 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
274 static bool disable_bypass;
275 module_param(disable_bypass, bool, S_IRUGO);
276 MODULE_PARM_DESC(disable_bypass,
277 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
279 enum arm_smmu_arch_version {
285 enum arm_smmu_implementation {
291 struct arm_smmu_smr {
297 struct arm_smmu_master_cfg {
299 u16 streamids[MAX_MASTER_STREAMIDS];
300 struct arm_smmu_smr *smrs;
303 struct arm_smmu_master {
304 struct device_node *of_node;
306 struct arm_smmu_master_cfg cfg;
309 struct arm_smmu_device {
314 unsigned long pgshift;
316 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
317 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
318 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
319 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
320 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
321 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
322 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
323 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
324 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
325 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
326 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
327 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
330 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
332 enum arm_smmu_arch_version version;
333 enum arm_smmu_implementation model;
335 u32 num_context_banks;
336 u32 num_s2_context_banks;
337 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
340 u32 num_mapping_groups;
341 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
343 unsigned long va_size;
344 unsigned long ipa_size;
345 unsigned long pa_size;
348 u32 num_context_irqs;
351 struct list_head list;
352 struct rb_root masters;
354 u32 cavium_id_base; /* Specific to Cavium */
357 enum arm_smmu_context_fmt {
358 ARM_SMMU_CTX_FMT_NONE,
359 ARM_SMMU_CTX_FMT_AARCH64,
360 ARM_SMMU_CTX_FMT_AARCH32_L,
361 ARM_SMMU_CTX_FMT_AARCH32_S,
364 struct arm_smmu_cfg {
368 enum arm_smmu_context_fmt fmt;
370 #define INVALID_IRPTNDX 0xff
372 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
373 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
375 enum arm_smmu_domain_stage {
376 ARM_SMMU_DOMAIN_S1 = 0,
378 ARM_SMMU_DOMAIN_NESTED,
381 struct arm_smmu_domain {
382 struct arm_smmu_device *smmu;
383 struct io_pgtable_ops *pgtbl_ops;
384 spinlock_t pgtbl_lock;
385 struct arm_smmu_cfg cfg;
386 enum arm_smmu_domain_stage stage;
387 struct mutex init_mutex; /* Protects smmu pointer */
388 struct iommu_domain domain;
391 static struct iommu_ops arm_smmu_ops;
393 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
394 static LIST_HEAD(arm_smmu_devices);
396 struct arm_smmu_option_prop {
401 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
403 static struct arm_smmu_option_prop arm_smmu_options[] = {
404 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
408 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
410 return container_of(dom, struct arm_smmu_domain, domain);
413 static void parse_driver_options(struct arm_smmu_device *smmu)
418 if (of_property_read_bool(smmu->dev->of_node,
419 arm_smmu_options[i].prop)) {
420 smmu->options |= arm_smmu_options[i].opt;
421 dev_notice(smmu->dev, "option %s\n",
422 arm_smmu_options[i].prop);
424 } while (arm_smmu_options[++i].opt);
427 static struct device_node *dev_get_dev_node(struct device *dev)
429 if (dev_is_pci(dev)) {
430 struct pci_bus *bus = to_pci_dev(dev)->bus;
432 while (!pci_is_root_bus(bus))
434 return bus->bridge->parent->of_node;
440 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
441 struct device_node *dev_node)
443 struct rb_node *node = smmu->masters.rb_node;
446 struct arm_smmu_master *master;
448 master = container_of(node, struct arm_smmu_master, node);
450 if (dev_node < master->of_node)
451 node = node->rb_left;
452 else if (dev_node > master->of_node)
453 node = node->rb_right;
461 static struct arm_smmu_master_cfg *
462 find_smmu_master_cfg(struct device *dev)
464 struct arm_smmu_master_cfg *cfg = NULL;
465 struct iommu_group *group = iommu_group_get(dev);
468 cfg = iommu_group_get_iommudata(group);
469 iommu_group_put(group);
475 static int insert_smmu_master(struct arm_smmu_device *smmu,
476 struct arm_smmu_master *master)
478 struct rb_node **new, *parent;
480 new = &smmu->masters.rb_node;
483 struct arm_smmu_master *this
484 = container_of(*new, struct arm_smmu_master, node);
487 if (master->of_node < this->of_node)
488 new = &((*new)->rb_left);
489 else if (master->of_node > this->of_node)
490 new = &((*new)->rb_right);
495 rb_link_node(&master->node, parent, new);
496 rb_insert_color(&master->node, &smmu->masters);
500 static int register_smmu_master(struct arm_smmu_device *smmu,
502 struct of_phandle_args *masterspec)
505 struct arm_smmu_master *master;
507 master = find_smmu_master(smmu, masterspec->np);
510 "rejecting multiple registrations for master device %s\n",
511 masterspec->np->name);
515 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
517 "reached maximum number (%d) of stream IDs for master device %s\n",
518 MAX_MASTER_STREAMIDS, masterspec->np->name);
522 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
526 master->of_node = masterspec->np;
527 master->cfg.num_streamids = masterspec->args_count;
529 for (i = 0; i < master->cfg.num_streamids; ++i) {
530 u16 streamid = masterspec->args[i];
532 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
533 (streamid >= smmu->num_mapping_groups)) {
535 "stream ID for master device %s greater than maximum allowed (%d)\n",
536 masterspec->np->name, smmu->num_mapping_groups);
539 master->cfg.streamids[i] = streamid;
541 return insert_smmu_master(smmu, master);
544 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
546 struct arm_smmu_device *smmu;
547 struct arm_smmu_master *master = NULL;
548 struct device_node *dev_node = dev_get_dev_node(dev);
550 spin_lock(&arm_smmu_devices_lock);
551 list_for_each_entry(smmu, &arm_smmu_devices, list) {
552 master = find_smmu_master(smmu, dev_node);
556 spin_unlock(&arm_smmu_devices_lock);
558 return master ? smmu : NULL;
561 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
566 idx = find_next_zero_bit(map, end, start);
569 } while (test_and_set_bit(idx, map));
574 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
579 /* Wait for any pending TLB invalidations to complete */
580 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
583 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
585 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
586 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
587 & sTLBGSTATUS_GSACTIVE) {
589 if (++count == TLB_LOOP_TIMEOUT) {
590 dev_err_ratelimited(smmu->dev,
591 "TLB sync timed out -- SMMU may be deadlocked\n");
598 static void arm_smmu_tlb_sync(void *cookie)
600 struct arm_smmu_domain *smmu_domain = cookie;
601 __arm_smmu_tlb_sync(smmu_domain->smmu);
604 static void arm_smmu_tlb_inv_context(void *cookie)
606 struct arm_smmu_domain *smmu_domain = cookie;
607 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
608 struct arm_smmu_device *smmu = smmu_domain->smmu;
609 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
613 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
614 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
615 base + ARM_SMMU_CB_S1_TLBIASID);
617 base = ARM_SMMU_GR0(smmu);
618 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
619 base + ARM_SMMU_GR0_TLBIVMID);
622 __arm_smmu_tlb_sync(smmu);
625 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
626 size_t granule, bool leaf, void *cookie)
628 struct arm_smmu_domain *smmu_domain = cookie;
629 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
630 struct arm_smmu_device *smmu = smmu_domain->smmu;
631 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
635 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
636 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
638 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
640 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
642 writel_relaxed(iova, reg);
644 } while (size -= granule);
647 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
649 writeq_relaxed(iova, reg);
650 iova += granule >> 12;
651 } while (size -= granule);
653 } else if (smmu->version == ARM_SMMU_V2) {
654 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
655 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
656 ARM_SMMU_CB_S2_TLBIIPAS2;
659 smmu_write_atomic_lq(iova, reg);
660 iova += granule >> 12;
661 } while (size -= granule);
663 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
664 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
668 static struct iommu_gather_ops arm_smmu_gather_ops = {
669 .tlb_flush_all = arm_smmu_tlb_inv_context,
670 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
671 .tlb_sync = arm_smmu_tlb_sync,
674 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
677 u32 fsr, fsynr, resume;
679 struct iommu_domain *domain = dev;
680 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
681 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
682 struct arm_smmu_device *smmu = smmu_domain->smmu;
683 void __iomem *cb_base;
685 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
686 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
688 if (!(fsr & FSR_FAULT))
692 dev_err_ratelimited(smmu->dev,
693 "Unexpected context fault (fsr 0x%x)\n",
696 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
697 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
699 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
700 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
702 resume = RESUME_RETRY;
704 dev_err_ratelimited(smmu->dev,
705 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
706 iova, fsynr, cfg->cbndx);
708 resume = RESUME_TERMINATE;
711 /* Clear the faulting FSR */
712 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
714 /* Retry or terminate any stalled transactions */
716 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
721 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
723 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
724 struct arm_smmu_device *smmu = dev;
725 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
727 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
728 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
729 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
730 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
735 dev_err_ratelimited(smmu->dev,
736 "Unexpected global fault, this could be serious\n");
737 dev_err_ratelimited(smmu->dev,
738 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
739 gfsr, gfsynr0, gfsynr1, gfsynr2);
741 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
745 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
746 struct io_pgtable_cfg *pgtbl_cfg)
751 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
752 struct arm_smmu_device *smmu = smmu_domain->smmu;
753 void __iomem *cb_base, *gr1_base;
755 gr1_base = ARM_SMMU_GR1(smmu);
756 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
757 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
759 if (smmu->version > ARM_SMMU_V1) {
760 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
761 reg = CBA2R_RW64_64BIT;
763 reg = CBA2R_RW64_32BIT;
764 /* 16-bit VMIDs live in CBA2R */
765 if (smmu->features & ARM_SMMU_FEAT_VMID16)
766 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
768 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
773 if (smmu->version < ARM_SMMU_V2)
774 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
777 * Use the weakest shareability/memory types, so they are
778 * overridden by the ttbcr/pte.
781 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
782 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
783 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
784 /* 8-bit VMIDs live in CBAR */
785 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
787 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
791 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
793 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
794 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
796 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
797 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
798 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
800 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
801 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
806 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
808 if (smmu->version > ARM_SMMU_V1) {
809 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
810 reg |= TTBCR2_SEP_UPSTREAM;
811 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
814 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
815 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
818 /* MAIRs (stage-1 only) */
820 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
821 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
822 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
827 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
829 reg |= SCTLR_S1_ASIDPNE;
833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
836 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
837 struct arm_smmu_device *smmu)
839 int irq, start, ret = 0;
840 unsigned long ias, oas;
841 struct io_pgtable_ops *pgtbl_ops;
842 struct io_pgtable_cfg pgtbl_cfg;
843 enum io_pgtable_fmt fmt;
844 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
845 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
847 mutex_lock(&smmu_domain->init_mutex);
848 if (smmu_domain->smmu)
852 * Mapping the requested stage onto what we support is surprisingly
853 * complicated, mainly because the spec allows S1+S2 SMMUs without
854 * support for nested translation. That means we end up with the
857 * Requested Supported Actual
867 * Note that you can't actually request stage-2 mappings.
869 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
870 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
871 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
872 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
875 * Choosing a suitable context format is even more fiddly. Until we
876 * grow some way for the caller to express a preference, and/or move
877 * the decision into the io-pgtable code where it arguably belongs,
878 * just aim for the closest thing to the rest of the system, and hope
879 * that the hardware isn't esoteric enough that we can't assume AArch64
880 * support to be a superset of AArch32 support...
882 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
883 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
884 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
885 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
886 ARM_SMMU_FEAT_FMT_AARCH64_16K |
887 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
888 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
890 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
895 switch (smmu_domain->stage) {
896 case ARM_SMMU_DOMAIN_S1:
897 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
898 start = smmu->num_s2_context_banks;
900 oas = smmu->ipa_size;
901 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
902 fmt = ARM_64_LPAE_S1;
904 fmt = ARM_32_LPAE_S1;
905 ias = min(ias, 32UL);
906 oas = min(oas, 40UL);
909 case ARM_SMMU_DOMAIN_NESTED:
911 * We will likely want to change this if/when KVM gets
914 case ARM_SMMU_DOMAIN_S2:
915 cfg->cbar = CBAR_TYPE_S2_TRANS;
917 ias = smmu->ipa_size;
919 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
920 fmt = ARM_64_LPAE_S2;
922 fmt = ARM_32_LPAE_S2;
923 ias = min(ias, 40UL);
924 oas = min(oas, 40UL);
932 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
933 smmu->num_context_banks);
934 if (IS_ERR_VALUE(ret))
938 if (smmu->version < ARM_SMMU_V2) {
939 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
940 cfg->irptndx %= smmu->num_context_irqs;
942 cfg->irptndx = cfg->cbndx;
945 pgtbl_cfg = (struct io_pgtable_cfg) {
946 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
949 .tlb = &arm_smmu_gather_ops,
950 .iommu_dev = smmu->dev,
953 smmu_domain->smmu = smmu;
954 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
960 /* Update our support page sizes to reflect the page table format */
961 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
963 /* Initialise the context bank with our page table cfg */
964 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
967 * Request context fault interrupt. Do this last to avoid the
968 * handler seeing a half-initialised domain state.
970 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
971 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
972 "arm-smmu-context-fault", domain);
973 if (IS_ERR_VALUE(ret)) {
974 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
976 cfg->irptndx = INVALID_IRPTNDX;
979 mutex_unlock(&smmu_domain->init_mutex);
981 /* Publish page table ops for map/unmap */
982 smmu_domain->pgtbl_ops = pgtbl_ops;
986 smmu_domain->smmu = NULL;
988 mutex_unlock(&smmu_domain->init_mutex);
992 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
994 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
995 struct arm_smmu_device *smmu = smmu_domain->smmu;
996 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
997 void __iomem *cb_base;
1004 * Disable the context bank and free the page tables before freeing
1007 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1008 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1010 if (cfg->irptndx != INVALID_IRPTNDX) {
1011 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1012 free_irq(irq, domain);
1015 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1016 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1019 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1021 struct arm_smmu_domain *smmu_domain;
1023 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1026 * Allocate the domain and initialise some of its data structures.
1027 * We can't really do anything meaningful until we've added a
1030 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1034 if (type == IOMMU_DOMAIN_DMA &&
1035 iommu_get_dma_cookie(&smmu_domain->domain)) {
1040 mutex_init(&smmu_domain->init_mutex);
1041 spin_lock_init(&smmu_domain->pgtbl_lock);
1043 return &smmu_domain->domain;
1046 static void arm_smmu_domain_free(struct iommu_domain *domain)
1048 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1051 * Free the domain resources. We assume that all devices have
1052 * already been detached.
1054 iommu_put_dma_cookie(domain);
1055 arm_smmu_destroy_domain_context(domain);
1059 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1060 struct arm_smmu_master_cfg *cfg)
1063 struct arm_smmu_smr *smrs;
1064 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1066 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1072 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1074 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1075 cfg->num_streamids);
1079 /* Allocate the SMRs on the SMMU */
1080 for (i = 0; i < cfg->num_streamids; ++i) {
1081 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1082 smmu->num_mapping_groups);
1083 if (IS_ERR_VALUE(idx)) {
1084 dev_err(smmu->dev, "failed to allocate free SMR\n");
1088 smrs[i] = (struct arm_smmu_smr) {
1090 .mask = 0, /* We don't currently share SMRs */
1091 .id = cfg->streamids[i],
1095 /* It worked! Now, poke the actual hardware */
1096 for (i = 0; i < cfg->num_streamids; ++i) {
1097 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1098 smrs[i].mask << SMR_MASK_SHIFT;
1099 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1107 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1112 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1113 struct arm_smmu_master_cfg *cfg)
1116 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1117 struct arm_smmu_smr *smrs = cfg->smrs;
1122 /* Invalidate the SMRs before freeing back to the allocator */
1123 for (i = 0; i < cfg->num_streamids; ++i) {
1124 u8 idx = smrs[i].idx;
1126 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1127 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1134 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1135 struct arm_smmu_master_cfg *cfg)
1138 struct arm_smmu_device *smmu = smmu_domain->smmu;
1139 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1141 /* Devices in an IOMMU group may already be configured */
1142 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1144 return ret == -EEXIST ? 0 : ret;
1147 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1148 * for all devices behind the SMMU.
1150 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1153 for (i = 0; i < cfg->num_streamids; ++i) {
1156 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1157 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1158 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1159 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1165 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1166 struct arm_smmu_master_cfg *cfg)
1169 struct arm_smmu_device *smmu = smmu_domain->smmu;
1170 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1172 /* An IOMMU group is torn down by the first device to be removed */
1173 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1177 * We *must* clear the S2CR first, because freeing the SMR means
1178 * that it can be re-allocated immediately.
1180 for (i = 0; i < cfg->num_streamids; ++i) {
1181 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1182 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1184 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1187 arm_smmu_master_free_smrs(smmu, cfg);
1190 static void arm_smmu_detach_dev(struct device *dev,
1191 struct arm_smmu_master_cfg *cfg)
1193 struct iommu_domain *domain = dev->archdata.iommu;
1194 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1196 dev->archdata.iommu = NULL;
1197 arm_smmu_domain_remove_master(smmu_domain, cfg);
1200 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1203 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1204 struct arm_smmu_device *smmu;
1205 struct arm_smmu_master_cfg *cfg;
1207 smmu = find_smmu_for_device(dev);
1209 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1213 /* Ensure that the domain is finalised */
1214 ret = arm_smmu_init_domain_context(domain, smmu);
1215 if (IS_ERR_VALUE(ret))
1219 * Sanity check the domain. We don't support domains across
1222 if (smmu_domain->smmu != smmu) {
1224 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1225 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1229 /* Looks ok, so add the device to the domain */
1230 cfg = find_smmu_master_cfg(dev);
1234 /* Detach the dev from its current domain */
1235 if (dev->archdata.iommu)
1236 arm_smmu_detach_dev(dev, cfg);
1238 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1240 dev->archdata.iommu = domain;
1244 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1245 phys_addr_t paddr, size_t size, int prot)
1248 unsigned long flags;
1249 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1250 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1255 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1256 ret = ops->map(ops, iova, paddr, size, prot);
1257 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1261 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1265 unsigned long flags;
1266 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1267 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1272 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1273 ret = ops->unmap(ops, iova, size);
1274 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1278 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1281 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1282 struct arm_smmu_device *smmu = smmu_domain->smmu;
1283 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1284 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1285 struct device *dev = smmu->dev;
1286 void __iomem *cb_base;
1291 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1293 /* ATS1 registers can only be written atomically */
1294 va = iova & ~0xfffUL;
1295 if (smmu->version == ARM_SMMU_V2)
1296 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1297 else /* Register is only 32-bit in v1 */
1298 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1300 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1301 !(tmp & ATSR_ACTIVE), 5, 50)) {
1303 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1305 return ops->iova_to_phys(ops, iova);
1308 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1309 if (phys & CB_PAR_F) {
1310 dev_err(dev, "translation fault!\n");
1311 dev_err(dev, "PAR = 0x%llx\n", phys);
1315 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1318 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1322 unsigned long flags;
1323 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1324 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1329 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1330 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1331 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1332 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1334 ret = ops->iova_to_phys(ops, iova);
1337 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1342 static bool arm_smmu_capable(enum iommu_cap cap)
1345 case IOMMU_CAP_CACHE_COHERENCY:
1347 * Return true here as the SMMU can always send out coherent
1351 case IOMMU_CAP_INTR_REMAP:
1352 return true; /* MSIs are just memory writes */
1353 case IOMMU_CAP_NOEXEC:
1360 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1362 *((u16 *)data) = alias;
1363 return 0; /* Continue walking */
1366 static void __arm_smmu_release_pci_iommudata(void *data)
1371 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1372 struct iommu_group *group)
1374 struct arm_smmu_master_cfg *cfg;
1378 cfg = iommu_group_get_iommudata(group);
1380 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1384 iommu_group_set_iommudata(group, cfg,
1385 __arm_smmu_release_pci_iommudata);
1388 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1392 * Assume Stream ID == Requester ID for now.
1393 * We need a way to describe the ID mappings in FDT.
1395 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1396 for (i = 0; i < cfg->num_streamids; ++i)
1397 if (cfg->streamids[i] == sid)
1400 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1401 if (i == cfg->num_streamids)
1402 cfg->streamids[cfg->num_streamids++] = sid;
1407 static int arm_smmu_init_platform_device(struct device *dev,
1408 struct iommu_group *group)
1410 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1411 struct arm_smmu_master *master;
1416 master = find_smmu_master(smmu, dev->of_node);
1420 iommu_group_set_iommudata(group, &master->cfg, NULL);
1425 static int arm_smmu_add_device(struct device *dev)
1427 struct iommu_group *group;
1429 group = iommu_group_get_for_dev(dev);
1431 return PTR_ERR(group);
1433 iommu_group_put(group);
1437 static void arm_smmu_remove_device(struct device *dev)
1439 iommu_group_remove_device(dev);
1442 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1444 struct iommu_group *group;
1447 if (dev_is_pci(dev))
1448 group = pci_device_group(dev);
1450 group = generic_device_group(dev);
1455 if (dev_is_pci(dev))
1456 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1458 ret = arm_smmu_init_platform_device(dev, group);
1461 iommu_group_put(group);
1462 group = ERR_PTR(ret);
1468 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1469 enum iommu_attr attr, void *data)
1471 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1474 case DOMAIN_ATTR_NESTING:
1475 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1482 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1483 enum iommu_attr attr, void *data)
1486 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1488 mutex_lock(&smmu_domain->init_mutex);
1491 case DOMAIN_ATTR_NESTING:
1492 if (smmu_domain->smmu) {
1498 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1500 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1508 mutex_unlock(&smmu_domain->init_mutex);
1512 static struct iommu_ops arm_smmu_ops = {
1513 .capable = arm_smmu_capable,
1514 .domain_alloc = arm_smmu_domain_alloc,
1515 .domain_free = arm_smmu_domain_free,
1516 .attach_dev = arm_smmu_attach_dev,
1517 .map = arm_smmu_map,
1518 .unmap = arm_smmu_unmap,
1519 .map_sg = default_iommu_map_sg,
1520 .iova_to_phys = arm_smmu_iova_to_phys,
1521 .add_device = arm_smmu_add_device,
1522 .remove_device = arm_smmu_remove_device,
1523 .device_group = arm_smmu_device_group,
1524 .domain_get_attr = arm_smmu_domain_get_attr,
1525 .domain_set_attr = arm_smmu_domain_set_attr,
1526 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1529 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1531 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1532 void __iomem *cb_base;
1536 /* clear global FSR */
1537 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1538 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1540 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1541 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1542 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1543 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1544 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1547 /* Make sure all context banks are disabled and clear CB_FSR */
1548 for (i = 0; i < smmu->num_context_banks; ++i) {
1549 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1550 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1551 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1553 * Disable MMU-500's not-particularly-beneficial next-page
1554 * prefetcher for the sake of errata #841119 and #826419.
1556 if (smmu->model == ARM_MMU500) {
1557 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1558 reg &= ~ARM_MMU500_ACTLR_CPRE;
1559 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1563 /* Invalidate the TLB, just in case */
1564 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1565 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1567 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1569 /* Enable fault reporting */
1570 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1572 /* Disable TLB broadcasting. */
1573 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1575 /* Enable client access, handling unmatched streams as appropriate */
1576 reg &= ~sCR0_CLIENTPD;
1580 reg &= ~sCR0_USFCFG;
1582 /* Disable forced broadcasting */
1585 /* Don't upgrade barriers */
1586 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1588 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1589 reg |= sCR0_VMID16EN;
1591 /* Push the button */
1592 __arm_smmu_tlb_sync(smmu);
1593 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1596 static int arm_smmu_id_size_to_bits(int size)
1615 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1618 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1620 bool cttw_dt, cttw_reg;
1622 dev_notice(smmu->dev, "probing hardware configuration...\n");
1623 dev_notice(smmu->dev, "SMMUv%d with:\n",
1624 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1627 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1629 /* Restrict available stages based on module parameter */
1630 if (force_stage == 1)
1631 id &= ~(ID0_S2TS | ID0_NTS);
1632 else if (force_stage == 2)
1633 id &= ~(ID0_S1TS | ID0_NTS);
1635 if (id & ID0_S1TS) {
1636 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1637 dev_notice(smmu->dev, "\tstage 1 translation\n");
1640 if (id & ID0_S2TS) {
1641 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1642 dev_notice(smmu->dev, "\tstage 2 translation\n");
1646 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1647 dev_notice(smmu->dev, "\tnested translation\n");
1650 if (!(smmu->features &
1651 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1652 dev_err(smmu->dev, "\tno translation support!\n");
1656 if ((id & ID0_S1TS) &&
1657 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1658 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1659 dev_notice(smmu->dev, "\taddress translation ops\n");
1663 * In order for DMA API calls to work properly, we must defer to what
1664 * the DT says about coherency, regardless of what the hardware claims.
1665 * Fortunately, this also opens up a workaround for systems where the
1666 * ID register value has ended up configured incorrectly.
1668 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1669 cttw_reg = !!(id & ID0_CTTW);
1671 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1672 if (cttw_dt || cttw_reg)
1673 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1674 cttw_dt ? "" : "non-");
1675 if (cttw_dt != cttw_reg)
1676 dev_notice(smmu->dev,
1677 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1682 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1683 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1685 if (smmu->num_mapping_groups == 0) {
1687 "stream-matching supported, but no SMRs present!\n");
1691 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1692 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1693 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1694 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1696 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1697 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1698 if ((mask & sid) != sid) {
1700 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1705 dev_notice(smmu->dev,
1706 "\tstream matching with %u register groups, mask 0x%x",
1707 smmu->num_mapping_groups, mask);
1709 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1713 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1714 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1715 if (!(id & ID0_PTFS_NO_AARCH32S))
1716 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1720 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1721 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1723 /* Check for size mismatch of SMMU address space from mapped region */
1724 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1725 size *= 2 << smmu->pgshift;
1726 if (smmu->size != size)
1728 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1731 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1732 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1733 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1734 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1737 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1738 smmu->num_context_banks, smmu->num_s2_context_banks);
1740 * Cavium CN88xx erratum #27704.
1741 * Ensure ASID and VMID allocation is unique across all SMMUs in
1744 if (smmu->model == CAVIUM_SMMUV2) {
1745 smmu->cavium_id_base =
1746 atomic_add_return(smmu->num_context_banks,
1747 &cavium_smmu_context_count);
1748 smmu->cavium_id_base -= smmu->num_context_banks;
1752 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1753 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1754 smmu->ipa_size = size;
1756 /* The output mask is also applied for bypass */
1757 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1758 smmu->pa_size = size;
1760 if (id & ID2_VMID16)
1761 smmu->features |= ARM_SMMU_FEAT_VMID16;
1764 * What the page table walker can address actually depends on which
1765 * descriptor format is in use, but since a) we don't know that yet,
1766 * and b) it can vary per context bank, this will have to do...
1768 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1770 "failed to set DMA mask for table walker\n");
1772 if (smmu->version < ARM_SMMU_V2) {
1773 smmu->va_size = smmu->ipa_size;
1774 if (smmu->version == ARM_SMMU_V1_64K)
1775 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1777 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1778 smmu->va_size = arm_smmu_id_size_to_bits(size);
1779 if (id & ID2_PTFS_4K)
1780 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1781 if (id & ID2_PTFS_16K)
1782 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1783 if (id & ID2_PTFS_64K)
1784 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1787 /* Now we've corralled the various formats, what'll it do? */
1789 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1790 size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1791 if (smmu->features &
1792 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1793 size |= SZ_4K | SZ_2M | SZ_1G;
1794 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1795 size |= SZ_16K | SZ_32M;
1796 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1797 size |= SZ_64K | SZ_512M;
1799 arm_smmu_ops.pgsize_bitmap &= size;
1800 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1802 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1803 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1804 smmu->va_size, smmu->ipa_size);
1806 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1807 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1808 smmu->ipa_size, smmu->pa_size);
1813 struct arm_smmu_match_data {
1814 enum arm_smmu_arch_version version;
1815 enum arm_smmu_implementation model;
1818 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1819 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1821 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1822 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1823 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1824 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1825 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1827 static const struct of_device_id arm_smmu_of_match[] = {
1828 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1829 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1830 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1831 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1832 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1833 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1836 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1838 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1840 const struct of_device_id *of_id;
1841 const struct arm_smmu_match_data *data;
1842 struct resource *res;
1843 struct arm_smmu_device *smmu;
1844 struct device *dev = &pdev->dev;
1845 struct rb_node *node;
1846 struct of_phandle_args masterspec;
1847 int num_irqs, i, err;
1849 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1851 dev_err(dev, "failed to allocate arm_smmu_device\n");
1856 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1858 smmu->version = data->version;
1859 smmu->model = data->model;
1861 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1862 smmu->base = devm_ioremap_resource(dev, res);
1863 if (IS_ERR(smmu->base))
1864 return PTR_ERR(smmu->base);
1865 smmu->size = resource_size(res);
1867 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1868 &smmu->num_global_irqs)) {
1869 dev_err(dev, "missing #global-interrupts property\n");
1874 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1876 if (num_irqs > smmu->num_global_irqs)
1877 smmu->num_context_irqs++;
1880 if (!smmu->num_context_irqs) {
1881 dev_err(dev, "found %d interrupts but expected at least %d\n",
1882 num_irqs, smmu->num_global_irqs + 1);
1886 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1889 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1893 for (i = 0; i < num_irqs; ++i) {
1894 int irq = platform_get_irq(pdev, i);
1897 dev_err(dev, "failed to get irq index %d\n", i);
1900 smmu->irqs[i] = irq;
1903 err = arm_smmu_device_cfg_probe(smmu);
1908 smmu->masters = RB_ROOT;
1909 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1910 "#stream-id-cells", i,
1912 err = register_smmu_master(smmu, dev, &masterspec);
1914 dev_err(dev, "failed to add master %s\n",
1915 masterspec.np->name);
1916 goto out_put_masters;
1921 dev_notice(dev, "registered %d master devices\n", i);
1923 parse_driver_options(smmu);
1925 if (smmu->version == ARM_SMMU_V2 &&
1926 smmu->num_context_banks != smmu->num_context_irqs) {
1928 "found only %d context interrupt(s) but %d required\n",
1929 smmu->num_context_irqs, smmu->num_context_banks);
1931 goto out_put_masters;
1934 for (i = 0; i < smmu->num_global_irqs; ++i) {
1935 err = request_irq(smmu->irqs[i],
1936 arm_smmu_global_fault,
1938 "arm-smmu global fault",
1941 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1947 INIT_LIST_HEAD(&smmu->list);
1948 spin_lock(&arm_smmu_devices_lock);
1949 list_add(&smmu->list, &arm_smmu_devices);
1950 spin_unlock(&arm_smmu_devices_lock);
1952 arm_smmu_device_reset(smmu);
1957 free_irq(smmu->irqs[i], smmu);
1960 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1961 struct arm_smmu_master *master
1962 = container_of(node, struct arm_smmu_master, node);
1963 of_node_put(master->of_node);
1969 static int arm_smmu_device_remove(struct platform_device *pdev)
1972 struct device *dev = &pdev->dev;
1973 struct arm_smmu_device *curr, *smmu = NULL;
1974 struct rb_node *node;
1976 spin_lock(&arm_smmu_devices_lock);
1977 list_for_each_entry(curr, &arm_smmu_devices, list) {
1978 if (curr->dev == dev) {
1980 list_del(&smmu->list);
1984 spin_unlock(&arm_smmu_devices_lock);
1989 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1990 struct arm_smmu_master *master
1991 = container_of(node, struct arm_smmu_master, node);
1992 of_node_put(master->of_node);
1995 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1996 dev_err(dev, "removing device with active domains!\n");
1998 for (i = 0; i < smmu->num_global_irqs; ++i)
1999 free_irq(smmu->irqs[i], smmu);
2001 /* Turn the thing off */
2002 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2006 static struct platform_driver arm_smmu_driver = {
2009 .of_match_table = of_match_ptr(arm_smmu_of_match),
2011 .probe = arm_smmu_device_dt_probe,
2012 .remove = arm_smmu_device_remove,
2015 static int __init arm_smmu_init(void)
2017 struct device_node *np;
2021 * Play nice with systems that don't have an ARM SMMU by checking that
2022 * an ARM SMMU exists in the system before proceeding with the driver
2023 * and IOMMU bus operation registration.
2025 np = of_find_matching_node(NULL, arm_smmu_of_match);
2031 ret = platform_driver_register(&arm_smmu_driver);
2035 /* Oh, for a proper bus abstraction */
2036 if (!iommu_present(&platform_bus_type))
2037 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2039 #ifdef CONFIG_ARM_AMBA
2040 if (!iommu_present(&amba_bustype))
2041 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2045 if (!iommu_present(&pci_bus_type))
2046 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2052 static void __exit arm_smmu_exit(void)
2054 return platform_driver_unregister(&arm_smmu_driver);
2057 subsys_initcall(arm_smmu_init);
2058 module_exit(arm_smmu_exit);
2060 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2061 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2062 MODULE_LICENSE("GPL v2");