Commit | Line | Data |
---|---|---|
45ae7cff WD |
1 | /* |
2 | * IOMMU API for ARM architected SMMU implementations. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
16 | * | |
17 | * Copyright (C) 2013 ARM Limited | |
18 | * | |
19 | * Author: Will Deacon <will.deacon@arm.com> | |
20 | * | |
21 | * This driver currently supports: | |
22 | * - SMMUv1 and v2 implementations | |
23 | * - Stream-matching and stream-indexing | |
24 | * - v7/v8 long-descriptor format | |
25 | * - Non-secure access to the SMMU | |
45ae7cff WD |
26 | * - Context fault reporting |
27 | */ | |
28 | ||
29 | #define pr_fmt(fmt) "arm-smmu: " fmt | |
30 | ||
31 | #include <linux/delay.h> | |
32 | #include <linux/dma-mapping.h> | |
33 | #include <linux/err.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/io.h> | |
36 | #include <linux/iommu.h> | |
859a732e | 37 | #include <linux/iopoll.h> |
45ae7cff WD |
38 | #include <linux/module.h> |
39 | #include <linux/of.h> | |
bae2c2d4 | 40 | #include <linux/of_address.h> |
a9a1b0b5 | 41 | #include <linux/pci.h> |
45ae7cff WD |
42 | #include <linux/platform_device.h> |
43 | #include <linux/slab.h> | |
44 | #include <linux/spinlock.h> | |
45 | ||
46 | #include <linux/amba/bus.h> | |
47 | ||
518f7136 | 48 | #include "io-pgtable.h" |
45ae7cff WD |
49 | |
50 | /* Maximum number of stream IDs assigned to a single device */ | |
636e97b0 | 51 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
45ae7cff WD |
52 | |
53 | /* Maximum number of context banks per SMMU */ | |
54 | #define ARM_SMMU_MAX_CBS 128 | |
55 | ||
56 | /* Maximum number of mapping groups per SMMU */ | |
57 | #define ARM_SMMU_MAX_SMRS 128 | |
58 | ||
45ae7cff WD |
59 | /* SMMU global address space */ |
60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | |
c757e852 | 61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) |
45ae7cff | 62 | |
3a5df8ff AH |
63 | /* |
64 | * SMMU global address space with conditional offset to access secure | |
65 | * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, | |
66 | * nsGFSYNR0: 0x450) | |
67 | */ | |
68 | #define ARM_SMMU_GR0_NS(smmu) \ | |
69 | ((smmu)->base + \ | |
70 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | |
71 | ? 0x400 : 0)) | |
72 | ||
45ae7cff WD |
73 | /* Configuration registers */ |
74 | #define ARM_SMMU_GR0_sCR0 0x0 | |
75 | #define sCR0_CLIENTPD (1 << 0) | |
76 | #define sCR0_GFRE (1 << 1) | |
77 | #define sCR0_GFIE (1 << 2) | |
78 | #define sCR0_GCFGFRE (1 << 4) | |
79 | #define sCR0_GCFGFIE (1 << 5) | |
80 | #define sCR0_USFCFG (1 << 10) | |
81 | #define sCR0_VMIDPNE (1 << 11) | |
82 | #define sCR0_PTM (1 << 12) | |
83 | #define sCR0_FB (1 << 13) | |
84 | #define sCR0_BSU_SHIFT 14 | |
85 | #define sCR0_BSU_MASK 0x3 | |
86 | ||
87 | /* Identification registers */ | |
88 | #define ARM_SMMU_GR0_ID0 0x20 | |
89 | #define ARM_SMMU_GR0_ID1 0x24 | |
90 | #define ARM_SMMU_GR0_ID2 0x28 | |
91 | #define ARM_SMMU_GR0_ID3 0x2c | |
92 | #define ARM_SMMU_GR0_ID4 0x30 | |
93 | #define ARM_SMMU_GR0_ID5 0x34 | |
94 | #define ARM_SMMU_GR0_ID6 0x38 | |
95 | #define ARM_SMMU_GR0_ID7 0x3c | |
96 | #define ARM_SMMU_GR0_sGFSR 0x48 | |
97 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | |
98 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | |
99 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | |
45ae7cff WD |
100 | |
101 | #define ID0_S1TS (1 << 30) | |
102 | #define ID0_S2TS (1 << 29) | |
103 | #define ID0_NTS (1 << 28) | |
104 | #define ID0_SMS (1 << 27) | |
859a732e | 105 | #define ID0_ATOSNS (1 << 26) |
45ae7cff WD |
106 | #define ID0_CTTW (1 << 14) |
107 | #define ID0_NUMIRPT_SHIFT 16 | |
108 | #define ID0_NUMIRPT_MASK 0xff | |
3c8766d0 OH |
109 | #define ID0_NUMSIDB_SHIFT 9 |
110 | #define ID0_NUMSIDB_MASK 0xf | |
45ae7cff WD |
111 | #define ID0_NUMSMRG_SHIFT 0 |
112 | #define ID0_NUMSMRG_MASK 0xff | |
113 | ||
114 | #define ID1_PAGESIZE (1 << 31) | |
115 | #define ID1_NUMPAGENDXB_SHIFT 28 | |
116 | #define ID1_NUMPAGENDXB_MASK 7 | |
117 | #define ID1_NUMS2CB_SHIFT 16 | |
118 | #define ID1_NUMS2CB_MASK 0xff | |
119 | #define ID1_NUMCB_SHIFT 0 | |
120 | #define ID1_NUMCB_MASK 0xff | |
121 | ||
122 | #define ID2_OAS_SHIFT 4 | |
123 | #define ID2_OAS_MASK 0xf | |
124 | #define ID2_IAS_SHIFT 0 | |
125 | #define ID2_IAS_MASK 0xf | |
126 | #define ID2_UBS_SHIFT 8 | |
127 | #define ID2_UBS_MASK 0xf | |
128 | #define ID2_PTFS_4K (1 << 12) | |
129 | #define ID2_PTFS_16K (1 << 13) | |
130 | #define ID2_PTFS_64K (1 << 14) | |
131 | ||
45ae7cff | 132 | /* Global TLB invalidation */ |
45ae7cff WD |
133 | #define ARM_SMMU_GR0_TLBIVMID 0x64 |
134 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | |
135 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | |
136 | #define ARM_SMMU_GR0_sTLBGSYNC 0x70 | |
137 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | |
138 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | |
139 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | |
140 | ||
141 | /* Stream mapping registers */ | |
142 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | |
143 | #define SMR_VALID (1 << 31) | |
144 | #define SMR_MASK_SHIFT 16 | |
145 | #define SMR_MASK_MASK 0x7fff | |
146 | #define SMR_ID_SHIFT 0 | |
147 | #define SMR_ID_MASK 0x7fff | |
148 | ||
149 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | |
150 | #define S2CR_CBNDX_SHIFT 0 | |
151 | #define S2CR_CBNDX_MASK 0xff | |
152 | #define S2CR_TYPE_SHIFT 16 | |
153 | #define S2CR_TYPE_MASK 0x3 | |
154 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | |
155 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | |
156 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | |
157 | ||
158 | /* Context bank attribute registers */ | |
159 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | |
160 | #define CBAR_VMID_SHIFT 0 | |
161 | #define CBAR_VMID_MASK 0xff | |
57ca90f6 WD |
162 | #define CBAR_S1_BPSHCFG_SHIFT 8 |
163 | #define CBAR_S1_BPSHCFG_MASK 3 | |
164 | #define CBAR_S1_BPSHCFG_NSH 3 | |
45ae7cff WD |
165 | #define CBAR_S1_MEMATTR_SHIFT 12 |
166 | #define CBAR_S1_MEMATTR_MASK 0xf | |
167 | #define CBAR_S1_MEMATTR_WB 0xf | |
168 | #define CBAR_TYPE_SHIFT 16 | |
169 | #define CBAR_TYPE_MASK 0x3 | |
170 | #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) | |
171 | #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) | |
172 | #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) | |
173 | #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) | |
174 | #define CBAR_IRPTNDX_SHIFT 24 | |
175 | #define CBAR_IRPTNDX_MASK 0xff | |
176 | ||
177 | #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) | |
178 | #define CBA2R_RW64_32BIT (0 << 0) | |
179 | #define CBA2R_RW64_64BIT (1 << 0) | |
180 | ||
181 | /* Translation context bank */ | |
182 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | |
c757e852 | 183 | #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) |
45ae7cff WD |
184 | |
185 | #define ARM_SMMU_CB_SCTLR 0x0 | |
186 | #define ARM_SMMU_CB_RESUME 0x8 | |
187 | #define ARM_SMMU_CB_TTBCR2 0x10 | |
188 | #define ARM_SMMU_CB_TTBR0_LO 0x20 | |
189 | #define ARM_SMMU_CB_TTBR0_HI 0x24 | |
518f7136 WD |
190 | #define ARM_SMMU_CB_TTBR1_LO 0x28 |
191 | #define ARM_SMMU_CB_TTBR1_HI 0x2c | |
45ae7cff WD |
192 | #define ARM_SMMU_CB_TTBCR 0x30 |
193 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | |
518f7136 | 194 | #define ARM_SMMU_CB_S1_MAIR1 0x3c |
859a732e MH |
195 | #define ARM_SMMU_CB_PAR_LO 0x50 |
196 | #define ARM_SMMU_CB_PAR_HI 0x54 | |
45ae7cff WD |
197 | #define ARM_SMMU_CB_FSR 0x58 |
198 | #define ARM_SMMU_CB_FAR_LO 0x60 | |
199 | #define ARM_SMMU_CB_FAR_HI 0x64 | |
200 | #define ARM_SMMU_CB_FSYNR0 0x68 | |
518f7136 | 201 | #define ARM_SMMU_CB_S1_TLBIVA 0x600 |
1463fe44 | 202 | #define ARM_SMMU_CB_S1_TLBIASID 0x610 |
518f7136 WD |
203 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 |
204 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 | |
205 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 | |
661d962f | 206 | #define ARM_SMMU_CB_ATS1PR 0x800 |
859a732e | 207 | #define ARM_SMMU_CB_ATSR 0x8f0 |
45ae7cff WD |
208 | |
209 | #define SCTLR_S1_ASIDPNE (1 << 12) | |
210 | #define SCTLR_CFCFG (1 << 7) | |
211 | #define SCTLR_CFIE (1 << 6) | |
212 | #define SCTLR_CFRE (1 << 5) | |
213 | #define SCTLR_E (1 << 4) | |
214 | #define SCTLR_AFE (1 << 2) | |
215 | #define SCTLR_TRE (1 << 1) | |
216 | #define SCTLR_M (1 << 0) | |
217 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | |
218 | ||
859a732e MH |
219 | #define CB_PAR_F (1 << 0) |
220 | ||
221 | #define ATSR_ACTIVE (1 << 0) | |
222 | ||
45ae7cff WD |
223 | #define RESUME_RETRY (0 << 0) |
224 | #define RESUME_TERMINATE (1 << 0) | |
225 | ||
45ae7cff | 226 | #define TTBCR2_SEP_SHIFT 15 |
5dc5616e | 227 | #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) |
45ae7cff | 228 | |
518f7136 | 229 | #define TTBRn_HI_ASID_SHIFT 16 |
45ae7cff WD |
230 | |
231 | #define FSR_MULTI (1 << 31) | |
232 | #define FSR_SS (1 << 30) | |
233 | #define FSR_UUT (1 << 8) | |
234 | #define FSR_ASF (1 << 7) | |
235 | #define FSR_TLBLKF (1 << 6) | |
236 | #define FSR_TLBMCF (1 << 5) | |
237 | #define FSR_EF (1 << 4) | |
238 | #define FSR_PF (1 << 3) | |
239 | #define FSR_AFF (1 << 2) | |
240 | #define FSR_TF (1 << 1) | |
241 | ||
2907320d MH |
242 | #define FSR_IGN (FSR_AFF | FSR_ASF | \ |
243 | FSR_TLBMCF | FSR_TLBLKF) | |
244 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | |
adaba320 | 245 | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) |
45ae7cff WD |
246 | |
247 | #define FSYNR0_WNR (1 << 4) | |
248 | ||
4cf740b0 | 249 | static int force_stage; |
e3ce0c94 | 250 | module_param_named(force_stage, force_stage, int, S_IRUGO); |
4cf740b0 WD |
251 | MODULE_PARM_DESC(force_stage, |
252 | "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); | |
253 | ||
09360403 RM |
254 | enum arm_smmu_arch_version { |
255 | ARM_SMMU_V1 = 1, | |
256 | ARM_SMMU_V2, | |
257 | }; | |
258 | ||
45ae7cff WD |
259 | struct arm_smmu_smr { |
260 | u8 idx; | |
261 | u16 mask; | |
262 | u16 id; | |
263 | }; | |
264 | ||
a9a1b0b5 | 265 | struct arm_smmu_master_cfg { |
45ae7cff WD |
266 | int num_streamids; |
267 | u16 streamids[MAX_MASTER_STREAMIDS]; | |
45ae7cff WD |
268 | struct arm_smmu_smr *smrs; |
269 | }; | |
270 | ||
a9a1b0b5 WD |
271 | struct arm_smmu_master { |
272 | struct device_node *of_node; | |
a9a1b0b5 WD |
273 | struct rb_node node; |
274 | struct arm_smmu_master_cfg cfg; | |
275 | }; | |
276 | ||
45ae7cff WD |
277 | struct arm_smmu_device { |
278 | struct device *dev; | |
45ae7cff WD |
279 | |
280 | void __iomem *base; | |
281 | unsigned long size; | |
c757e852 | 282 | unsigned long pgshift; |
45ae7cff WD |
283 | |
284 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | |
285 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | |
286 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | |
287 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | |
288 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | |
859a732e | 289 | #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) |
45ae7cff | 290 | u32 features; |
3a5df8ff AH |
291 | |
292 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | |
293 | u32 options; | |
09360403 | 294 | enum arm_smmu_arch_version version; |
45ae7cff WD |
295 | |
296 | u32 num_context_banks; | |
297 | u32 num_s2_context_banks; | |
298 | DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); | |
299 | atomic_t irptndx; | |
300 | ||
301 | u32 num_mapping_groups; | |
302 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | |
303 | ||
518f7136 WD |
304 | unsigned long va_size; |
305 | unsigned long ipa_size; | |
306 | unsigned long pa_size; | |
45ae7cff WD |
307 | |
308 | u32 num_global_irqs; | |
309 | u32 num_context_irqs; | |
310 | unsigned int *irqs; | |
311 | ||
45ae7cff WD |
312 | struct list_head list; |
313 | struct rb_root masters; | |
314 | }; | |
315 | ||
316 | struct arm_smmu_cfg { | |
45ae7cff WD |
317 | u8 cbndx; |
318 | u8 irptndx; | |
319 | u32 cbar; | |
45ae7cff | 320 | }; |
faea13b7 | 321 | #define INVALID_IRPTNDX 0xff |
45ae7cff | 322 | |
ecfadb6e WD |
323 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) |
324 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | |
325 | ||
c752ce45 WD |
326 | enum arm_smmu_domain_stage { |
327 | ARM_SMMU_DOMAIN_S1 = 0, | |
328 | ARM_SMMU_DOMAIN_S2, | |
329 | ARM_SMMU_DOMAIN_NESTED, | |
330 | }; | |
331 | ||
45ae7cff | 332 | struct arm_smmu_domain { |
44680eed | 333 | struct arm_smmu_device *smmu; |
518f7136 WD |
334 | struct io_pgtable_ops *pgtbl_ops; |
335 | spinlock_t pgtbl_lock; | |
44680eed | 336 | struct arm_smmu_cfg cfg; |
c752ce45 | 337 | enum arm_smmu_domain_stage stage; |
518f7136 | 338 | struct mutex init_mutex; /* Protects smmu pointer */ |
1d672638 | 339 | struct iommu_domain domain; |
45ae7cff WD |
340 | }; |
341 | ||
518f7136 WD |
342 | static struct iommu_ops arm_smmu_ops; |
343 | ||
45ae7cff WD |
344 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
345 | static LIST_HEAD(arm_smmu_devices); | |
346 | ||
3a5df8ff AH |
347 | struct arm_smmu_option_prop { |
348 | u32 opt; | |
349 | const char *prop; | |
350 | }; | |
351 | ||
2907320d | 352 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
3a5df8ff AH |
353 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, |
354 | { 0, NULL}, | |
355 | }; | |
356 | ||
1d672638 JR |
357 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) |
358 | { | |
359 | return container_of(dom, struct arm_smmu_domain, domain); | |
360 | } | |
361 | ||
3a5df8ff AH |
362 | static void parse_driver_options(struct arm_smmu_device *smmu) |
363 | { | |
364 | int i = 0; | |
2907320d | 365 | |
3a5df8ff AH |
366 | do { |
367 | if (of_property_read_bool(smmu->dev->of_node, | |
368 | arm_smmu_options[i].prop)) { | |
369 | smmu->options |= arm_smmu_options[i].opt; | |
370 | dev_notice(smmu->dev, "option %s\n", | |
371 | arm_smmu_options[i].prop); | |
372 | } | |
373 | } while (arm_smmu_options[++i].opt); | |
374 | } | |
375 | ||
8f68f8e2 | 376 | static struct device_node *dev_get_dev_node(struct device *dev) |
a9a1b0b5 WD |
377 | { |
378 | if (dev_is_pci(dev)) { | |
379 | struct pci_bus *bus = to_pci_dev(dev)->bus; | |
2907320d | 380 | |
a9a1b0b5 WD |
381 | while (!pci_is_root_bus(bus)) |
382 | bus = bus->parent; | |
8f68f8e2 | 383 | return bus->bridge->parent->of_node; |
a9a1b0b5 WD |
384 | } |
385 | ||
8f68f8e2 | 386 | return dev->of_node; |
a9a1b0b5 WD |
387 | } |
388 | ||
45ae7cff WD |
389 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
390 | struct device_node *dev_node) | |
391 | { | |
392 | struct rb_node *node = smmu->masters.rb_node; | |
393 | ||
394 | while (node) { | |
395 | struct arm_smmu_master *master; | |
2907320d | 396 | |
45ae7cff WD |
397 | master = container_of(node, struct arm_smmu_master, node); |
398 | ||
399 | if (dev_node < master->of_node) | |
400 | node = node->rb_left; | |
401 | else if (dev_node > master->of_node) | |
402 | node = node->rb_right; | |
403 | else | |
404 | return master; | |
405 | } | |
406 | ||
407 | return NULL; | |
408 | } | |
409 | ||
a9a1b0b5 | 410 | static struct arm_smmu_master_cfg * |
8f68f8e2 | 411 | find_smmu_master_cfg(struct device *dev) |
a9a1b0b5 | 412 | { |
8f68f8e2 WD |
413 | struct arm_smmu_master_cfg *cfg = NULL; |
414 | struct iommu_group *group = iommu_group_get(dev); | |
a9a1b0b5 | 415 | |
8f68f8e2 WD |
416 | if (group) { |
417 | cfg = iommu_group_get_iommudata(group); | |
418 | iommu_group_put(group); | |
419 | } | |
a9a1b0b5 | 420 | |
8f68f8e2 | 421 | return cfg; |
a9a1b0b5 WD |
422 | } |
423 | ||
45ae7cff WD |
424 | static int insert_smmu_master(struct arm_smmu_device *smmu, |
425 | struct arm_smmu_master *master) | |
426 | { | |
427 | struct rb_node **new, *parent; | |
428 | ||
429 | new = &smmu->masters.rb_node; | |
430 | parent = NULL; | |
431 | while (*new) { | |
2907320d MH |
432 | struct arm_smmu_master *this |
433 | = container_of(*new, struct arm_smmu_master, node); | |
45ae7cff WD |
434 | |
435 | parent = *new; | |
436 | if (master->of_node < this->of_node) | |
437 | new = &((*new)->rb_left); | |
438 | else if (master->of_node > this->of_node) | |
439 | new = &((*new)->rb_right); | |
440 | else | |
441 | return -EEXIST; | |
442 | } | |
443 | ||
444 | rb_link_node(&master->node, parent, new); | |
445 | rb_insert_color(&master->node, &smmu->masters); | |
446 | return 0; | |
447 | } | |
448 | ||
449 | static int register_smmu_master(struct arm_smmu_device *smmu, | |
450 | struct device *dev, | |
451 | struct of_phandle_args *masterspec) | |
452 | { | |
453 | int i; | |
454 | struct arm_smmu_master *master; | |
455 | ||
456 | master = find_smmu_master(smmu, masterspec->np); | |
457 | if (master) { | |
458 | dev_err(dev, | |
459 | "rejecting multiple registrations for master device %s\n", | |
460 | masterspec->np->name); | |
461 | return -EBUSY; | |
462 | } | |
463 | ||
464 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | |
465 | dev_err(dev, | |
466 | "reached maximum number (%d) of stream IDs for master device %s\n", | |
467 | MAX_MASTER_STREAMIDS, masterspec->np->name); | |
468 | return -ENOSPC; | |
469 | } | |
470 | ||
471 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | |
472 | if (!master) | |
473 | return -ENOMEM; | |
474 | ||
a9a1b0b5 WD |
475 | master->of_node = masterspec->np; |
476 | master->cfg.num_streamids = masterspec->args_count; | |
45ae7cff | 477 | |
3c8766d0 OH |
478 | for (i = 0; i < master->cfg.num_streamids; ++i) { |
479 | u16 streamid = masterspec->args[i]; | |
45ae7cff | 480 | |
3c8766d0 OH |
481 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && |
482 | (streamid >= smmu->num_mapping_groups)) { | |
483 | dev_err(dev, | |
484 | "stream ID for master device %s greater than maximum allowed (%d)\n", | |
485 | masterspec->np->name, smmu->num_mapping_groups); | |
486 | return -ERANGE; | |
487 | } | |
488 | master->cfg.streamids[i] = streamid; | |
489 | } | |
45ae7cff WD |
490 | return insert_smmu_master(smmu, master); |
491 | } | |
492 | ||
44680eed | 493 | static struct arm_smmu_device *find_smmu_for_device(struct device *dev) |
45ae7cff | 494 | { |
44680eed | 495 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 496 | struct arm_smmu_master *master = NULL; |
8f68f8e2 | 497 | struct device_node *dev_node = dev_get_dev_node(dev); |
45ae7cff WD |
498 | |
499 | spin_lock(&arm_smmu_devices_lock); | |
44680eed | 500 | list_for_each_entry(smmu, &arm_smmu_devices, list) { |
a9a1b0b5 WD |
501 | master = find_smmu_master(smmu, dev_node); |
502 | if (master) | |
503 | break; | |
504 | } | |
45ae7cff | 505 | spin_unlock(&arm_smmu_devices_lock); |
44680eed | 506 | |
a9a1b0b5 | 507 | return master ? smmu : NULL; |
45ae7cff WD |
508 | } |
509 | ||
510 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | |
511 | { | |
512 | int idx; | |
513 | ||
514 | do { | |
515 | idx = find_next_zero_bit(map, end, start); | |
516 | if (idx == end) | |
517 | return -ENOSPC; | |
518 | } while (test_and_set_bit(idx, map)); | |
519 | ||
520 | return idx; | |
521 | } | |
522 | ||
523 | static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | |
524 | { | |
525 | clear_bit(idx, map); | |
526 | } | |
527 | ||
528 | /* Wait for any pending TLB invalidations to complete */ | |
518f7136 | 529 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) |
45ae7cff WD |
530 | { |
531 | int count = 0; | |
532 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
533 | ||
534 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | |
535 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | |
536 | & sTLBGSTATUS_GSACTIVE) { | |
537 | cpu_relax(); | |
538 | if (++count == TLB_LOOP_TIMEOUT) { | |
539 | dev_err_ratelimited(smmu->dev, | |
540 | "TLB sync timed out -- SMMU may be deadlocked\n"); | |
541 | return; | |
542 | } | |
543 | udelay(1); | |
544 | } | |
545 | } | |
546 | ||
518f7136 WD |
547 | static void arm_smmu_tlb_sync(void *cookie) |
548 | { | |
549 | struct arm_smmu_domain *smmu_domain = cookie; | |
550 | __arm_smmu_tlb_sync(smmu_domain->smmu); | |
551 | } | |
552 | ||
553 | static void arm_smmu_tlb_inv_context(void *cookie) | |
1463fe44 | 554 | { |
518f7136 | 555 | struct arm_smmu_domain *smmu_domain = cookie; |
44680eed WD |
556 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
557 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
1463fe44 | 558 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
518f7136 | 559 | void __iomem *base; |
1463fe44 WD |
560 | |
561 | if (stage1) { | |
562 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
ecfadb6e WD |
563 | writel_relaxed(ARM_SMMU_CB_ASID(cfg), |
564 | base + ARM_SMMU_CB_S1_TLBIASID); | |
1463fe44 WD |
565 | } else { |
566 | base = ARM_SMMU_GR0(smmu); | |
ecfadb6e WD |
567 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), |
568 | base + ARM_SMMU_GR0_TLBIVMID); | |
1463fe44 WD |
569 | } |
570 | ||
518f7136 WD |
571 | __arm_smmu_tlb_sync(smmu); |
572 | } | |
573 | ||
574 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
575 | bool leaf, void *cookie) | |
576 | { | |
577 | struct arm_smmu_domain *smmu_domain = cookie; | |
578 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
579 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
580 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | |
581 | void __iomem *reg; | |
582 | ||
583 | if (stage1) { | |
584 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
585 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
586 | ||
587 | if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { | |
588 | iova &= ~12UL; | |
589 | iova |= ARM_SMMU_CB_ASID(cfg); | |
590 | writel_relaxed(iova, reg); | |
591 | #ifdef CONFIG_64BIT | |
592 | } else { | |
593 | iova >>= 12; | |
594 | iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; | |
595 | writeq_relaxed(iova, reg); | |
596 | #endif | |
597 | } | |
598 | #ifdef CONFIG_64BIT | |
599 | } else if (smmu->version == ARM_SMMU_V2) { | |
600 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
601 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : | |
602 | ARM_SMMU_CB_S2_TLBIIPAS2; | |
603 | writeq_relaxed(iova >> 12, reg); | |
604 | #endif | |
605 | } else { | |
606 | reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; | |
607 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); | |
608 | } | |
609 | } | |
610 | ||
611 | static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) | |
612 | { | |
613 | struct arm_smmu_domain *smmu_domain = cookie; | |
614 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
615 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | |
616 | ||
617 | ||
618 | /* Ensure new page tables are visible to the hardware walker */ | |
619 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { | |
620 | dsb(ishst); | |
621 | } else { | |
622 | /* | |
623 | * If the SMMU can't walk tables in the CPU caches, treat them | |
624 | * like non-coherent DMA since we need to flush the new entries | |
625 | * all the way out to memory. There's no possibility of | |
626 | * recursion here as the SMMU table walker will not be wired | |
627 | * through another SMMU. | |
628 | */ | |
629 | dma_map_page(smmu->dev, virt_to_page(addr), offset, size, | |
630 | DMA_TO_DEVICE); | |
631 | } | |
1463fe44 WD |
632 | } |
633 | ||
518f7136 WD |
634 | static struct iommu_gather_ops arm_smmu_gather_ops = { |
635 | .tlb_flush_all = arm_smmu_tlb_inv_context, | |
636 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | |
637 | .tlb_sync = arm_smmu_tlb_sync, | |
638 | .flush_pgtable = arm_smmu_flush_pgtable, | |
639 | }; | |
640 | ||
45ae7cff WD |
641 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
642 | { | |
643 | int flags, ret; | |
644 | u32 fsr, far, fsynr, resume; | |
645 | unsigned long iova; | |
646 | struct iommu_domain *domain = dev; | |
1d672638 | 647 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed WD |
648 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
649 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
45ae7cff WD |
650 | void __iomem *cb_base; |
651 | ||
44680eed | 652 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
45ae7cff WD |
653 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); |
654 | ||
655 | if (!(fsr & FSR_FAULT)) | |
656 | return IRQ_NONE; | |
657 | ||
658 | if (fsr & FSR_IGN) | |
659 | dev_err_ratelimited(smmu->dev, | |
70c9a7db | 660 | "Unexpected context fault (fsr 0x%x)\n", |
45ae7cff WD |
661 | fsr); |
662 | ||
663 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | |
664 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
665 | ||
666 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); | |
667 | iova = far; | |
668 | #ifdef CONFIG_64BIT | |
669 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); | |
670 | iova |= ((unsigned long)far << 32); | |
671 | #endif | |
672 | ||
673 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | |
674 | ret = IRQ_HANDLED; | |
675 | resume = RESUME_RETRY; | |
676 | } else { | |
2ef0f031 AH |
677 | dev_err_ratelimited(smmu->dev, |
678 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | |
44680eed | 679 | iova, fsynr, cfg->cbndx); |
45ae7cff WD |
680 | ret = IRQ_NONE; |
681 | resume = RESUME_TERMINATE; | |
682 | } | |
683 | ||
684 | /* Clear the faulting FSR */ | |
685 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | |
686 | ||
687 | /* Retry or terminate any stalled transactions */ | |
688 | if (fsr & FSR_SS) | |
689 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | |
690 | ||
691 | return ret; | |
692 | } | |
693 | ||
694 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |
695 | { | |
696 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | |
697 | struct arm_smmu_device *smmu = dev; | |
3a5df8ff | 698 | void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); |
45ae7cff WD |
699 | |
700 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | |
701 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | |
702 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | |
703 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | |
704 | ||
3a5df8ff AH |
705 | if (!gfsr) |
706 | return IRQ_NONE; | |
707 | ||
45ae7cff WD |
708 | dev_err_ratelimited(smmu->dev, |
709 | "Unexpected global fault, this could be serious\n"); | |
710 | dev_err_ratelimited(smmu->dev, | |
711 | "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", | |
712 | gfsr, gfsynr0, gfsynr1, gfsynr2); | |
713 | ||
714 | writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); | |
adaba320 | 715 | return IRQ_HANDLED; |
45ae7cff WD |
716 | } |
717 | ||
518f7136 WD |
718 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
719 | struct io_pgtable_cfg *pgtbl_cfg) | |
45ae7cff WD |
720 | { |
721 | u32 reg; | |
722 | bool stage1; | |
44680eed WD |
723 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
724 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
45ae7cff WD |
725 | void __iomem *cb_base, *gr0_base, *gr1_base; |
726 | ||
727 | gr0_base = ARM_SMMU_GR0(smmu); | |
728 | gr1_base = ARM_SMMU_GR1(smmu); | |
44680eed WD |
729 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
730 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
45ae7cff | 731 | |
4a1c93cb WD |
732 | if (smmu->version > ARM_SMMU_V1) { |
733 | /* | |
734 | * CBA2R. | |
735 | * *Must* be initialised before CBAR thanks to VMID16 | |
736 | * architectural oversight affected some implementations. | |
737 | */ | |
738 | #ifdef CONFIG_64BIT | |
739 | reg = CBA2R_RW64_64BIT; | |
740 | #else | |
741 | reg = CBA2R_RW64_32BIT; | |
742 | #endif | |
743 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | |
744 | } | |
745 | ||
45ae7cff | 746 | /* CBAR */ |
44680eed | 747 | reg = cfg->cbar; |
09360403 | 748 | if (smmu->version == ARM_SMMU_V1) |
2907320d | 749 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; |
45ae7cff | 750 | |
57ca90f6 WD |
751 | /* |
752 | * Use the weakest shareability/memory types, so they are | |
753 | * overridden by the ttbcr/pte. | |
754 | */ | |
755 | if (stage1) { | |
756 | reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | | |
757 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | |
758 | } else { | |
44680eed | 759 | reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; |
57ca90f6 | 760 | } |
44680eed | 761 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
45ae7cff | 762 | |
518f7136 WD |
763 | /* TTBRs */ |
764 | if (stage1) { | |
765 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | |
766 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | |
767 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; | |
44680eed | 768 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; |
518f7136 | 769 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); |
45ae7cff | 770 | |
518f7136 WD |
771 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; |
772 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); | |
773 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; | |
774 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; | |
775 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); | |
776 | } else { | |
777 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; | |
778 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | |
779 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; | |
780 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | |
781 | } | |
a65217a4 | 782 | |
518f7136 WD |
783 | /* TTBCR */ |
784 | if (stage1) { | |
785 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | |
786 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
787 | if (smmu->version > ARM_SMMU_V1) { | |
788 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | |
5dc5616e | 789 | reg |= TTBCR2_SEP_UPSTREAM; |
518f7136 | 790 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
45ae7cff WD |
791 | } |
792 | } else { | |
518f7136 WD |
793 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
794 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
45ae7cff WD |
795 | } |
796 | ||
518f7136 | 797 | /* MAIRs (stage-1 only) */ |
45ae7cff | 798 | if (stage1) { |
518f7136 | 799 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; |
45ae7cff | 800 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); |
518f7136 WD |
801 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; |
802 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); | |
45ae7cff WD |
803 | } |
804 | ||
45ae7cff WD |
805 | /* SCTLR */ |
806 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | |
807 | if (stage1) | |
808 | reg |= SCTLR_S1_ASIDPNE; | |
809 | #ifdef __BIG_ENDIAN | |
810 | reg |= SCTLR_E; | |
811 | #endif | |
25724841 | 812 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); |
45ae7cff WD |
813 | } |
814 | ||
815 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |
44680eed | 816 | struct arm_smmu_device *smmu) |
45ae7cff | 817 | { |
a18037b2 | 818 | int irq, start, ret = 0; |
518f7136 WD |
819 | unsigned long ias, oas; |
820 | struct io_pgtable_ops *pgtbl_ops; | |
821 | struct io_pgtable_cfg pgtbl_cfg; | |
822 | enum io_pgtable_fmt fmt; | |
1d672638 | 823 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed | 824 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
45ae7cff | 825 | |
518f7136 | 826 | mutex_lock(&smmu_domain->init_mutex); |
a18037b2 MH |
827 | if (smmu_domain->smmu) |
828 | goto out_unlock; | |
829 | ||
c752ce45 WD |
830 | /* |
831 | * Mapping the requested stage onto what we support is surprisingly | |
832 | * complicated, mainly because the spec allows S1+S2 SMMUs without | |
833 | * support for nested translation. That means we end up with the | |
834 | * following table: | |
835 | * | |
836 | * Requested Supported Actual | |
837 | * S1 N S1 | |
838 | * S1 S1+S2 S1 | |
839 | * S1 S2 S2 | |
840 | * S1 S1 S1 | |
841 | * N N N | |
842 | * N S1+S2 S2 | |
843 | * N S2 S2 | |
844 | * N S1 S1 | |
845 | * | |
846 | * Note that you can't actually request stage-2 mappings. | |
847 | */ | |
848 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) | |
849 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; | |
850 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) | |
851 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
852 | ||
853 | switch (smmu_domain->stage) { | |
854 | case ARM_SMMU_DOMAIN_S1: | |
855 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | |
856 | start = smmu->num_s2_context_banks; | |
518f7136 WD |
857 | ias = smmu->va_size; |
858 | oas = smmu->ipa_size; | |
859 | if (IS_ENABLED(CONFIG_64BIT)) | |
860 | fmt = ARM_64_LPAE_S1; | |
861 | else | |
862 | fmt = ARM_32_LPAE_S1; | |
c752ce45 WD |
863 | break; |
864 | case ARM_SMMU_DOMAIN_NESTED: | |
45ae7cff WD |
865 | /* |
866 | * We will likely want to change this if/when KVM gets | |
867 | * involved. | |
868 | */ | |
c752ce45 | 869 | case ARM_SMMU_DOMAIN_S2: |
9c5c92e3 WD |
870 | cfg->cbar = CBAR_TYPE_S2_TRANS; |
871 | start = 0; | |
518f7136 WD |
872 | ias = smmu->ipa_size; |
873 | oas = smmu->pa_size; | |
874 | if (IS_ENABLED(CONFIG_64BIT)) | |
875 | fmt = ARM_64_LPAE_S2; | |
876 | else | |
877 | fmt = ARM_32_LPAE_S2; | |
c752ce45 WD |
878 | break; |
879 | default: | |
880 | ret = -EINVAL; | |
881 | goto out_unlock; | |
45ae7cff WD |
882 | } |
883 | ||
884 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | |
885 | smmu->num_context_banks); | |
886 | if (IS_ERR_VALUE(ret)) | |
a18037b2 | 887 | goto out_unlock; |
45ae7cff | 888 | |
44680eed | 889 | cfg->cbndx = ret; |
09360403 | 890 | if (smmu->version == ARM_SMMU_V1) { |
44680eed WD |
891 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); |
892 | cfg->irptndx %= smmu->num_context_irqs; | |
45ae7cff | 893 | } else { |
44680eed | 894 | cfg->irptndx = cfg->cbndx; |
45ae7cff WD |
895 | } |
896 | ||
518f7136 WD |
897 | pgtbl_cfg = (struct io_pgtable_cfg) { |
898 | .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, | |
899 | .ias = ias, | |
900 | .oas = oas, | |
901 | .tlb = &arm_smmu_gather_ops, | |
902 | }; | |
903 | ||
904 | smmu_domain->smmu = smmu; | |
905 | pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); | |
906 | if (!pgtbl_ops) { | |
907 | ret = -ENOMEM; | |
908 | goto out_clear_smmu; | |
909 | } | |
910 | ||
911 | /* Update our support page sizes to reflect the page table format */ | |
912 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
a18037b2 | 913 | |
518f7136 WD |
914 | /* Initialise the context bank with our page table cfg */ |
915 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); | |
916 | ||
917 | /* | |
918 | * Request context fault interrupt. Do this last to avoid the | |
919 | * handler seeing a half-initialised domain state. | |
920 | */ | |
44680eed | 921 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
45ae7cff WD |
922 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
923 | "arm-smmu-context-fault", domain); | |
924 | if (IS_ERR_VALUE(ret)) { | |
925 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | |
44680eed WD |
926 | cfg->irptndx, irq); |
927 | cfg->irptndx = INVALID_IRPTNDX; | |
45ae7cff WD |
928 | } |
929 | ||
518f7136 WD |
930 | mutex_unlock(&smmu_domain->init_mutex); |
931 | ||
932 | /* Publish page table ops for map/unmap */ | |
933 | smmu_domain->pgtbl_ops = pgtbl_ops; | |
a9a1b0b5 | 934 | return 0; |
45ae7cff | 935 | |
518f7136 WD |
936 | out_clear_smmu: |
937 | smmu_domain->smmu = NULL; | |
a18037b2 | 938 | out_unlock: |
518f7136 | 939 | mutex_unlock(&smmu_domain->init_mutex); |
45ae7cff WD |
940 | return ret; |
941 | } | |
942 | ||
943 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |
944 | { | |
1d672638 | 945 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed WD |
946 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
947 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1463fe44 | 948 | void __iomem *cb_base; |
45ae7cff WD |
949 | int irq; |
950 | ||
951 | if (!smmu) | |
952 | return; | |
953 | ||
518f7136 WD |
954 | /* |
955 | * Disable the context bank and free the page tables before freeing | |
956 | * it. | |
957 | */ | |
44680eed | 958 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
1463fe44 | 959 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1463fe44 | 960 | |
44680eed WD |
961 | if (cfg->irptndx != INVALID_IRPTNDX) { |
962 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | |
45ae7cff WD |
963 | free_irq(irq, domain); |
964 | } | |
965 | ||
518f7136 WD |
966 | if (smmu_domain->pgtbl_ops) |
967 | free_io_pgtable_ops(smmu_domain->pgtbl_ops); | |
968 | ||
44680eed | 969 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
45ae7cff WD |
970 | } |
971 | ||
1d672638 | 972 | static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) |
45ae7cff WD |
973 | { |
974 | struct arm_smmu_domain *smmu_domain; | |
45ae7cff | 975 | |
1d672638 JR |
976 | if (type != IOMMU_DOMAIN_UNMANAGED) |
977 | return NULL; | |
45ae7cff WD |
978 | /* |
979 | * Allocate the domain and initialise some of its data structures. | |
980 | * We can't really do anything meaningful until we've added a | |
981 | * master. | |
982 | */ | |
983 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | |
984 | if (!smmu_domain) | |
1d672638 | 985 | return NULL; |
45ae7cff | 986 | |
518f7136 WD |
987 | mutex_init(&smmu_domain->init_mutex); |
988 | spin_lock_init(&smmu_domain->pgtbl_lock); | |
1d672638 JR |
989 | |
990 | return &smmu_domain->domain; | |
45ae7cff WD |
991 | } |
992 | ||
1d672638 | 993 | static void arm_smmu_domain_free(struct iommu_domain *domain) |
45ae7cff | 994 | { |
1d672638 | 995 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1463fe44 WD |
996 | |
997 | /* | |
998 | * Free the domain resources. We assume that all devices have | |
999 | * already been detached. | |
1000 | */ | |
45ae7cff | 1001 | arm_smmu_destroy_domain_context(domain); |
45ae7cff WD |
1002 | kfree(smmu_domain); |
1003 | } | |
1004 | ||
1005 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 1006 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1007 | { |
1008 | int i; | |
1009 | struct arm_smmu_smr *smrs; | |
1010 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1011 | ||
1012 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | |
1013 | return 0; | |
1014 | ||
a9a1b0b5 | 1015 | if (cfg->smrs) |
45ae7cff WD |
1016 | return -EEXIST; |
1017 | ||
2907320d | 1018 | smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); |
45ae7cff | 1019 | if (!smrs) { |
a9a1b0b5 WD |
1020 | dev_err(smmu->dev, "failed to allocate %d SMRs\n", |
1021 | cfg->num_streamids); | |
45ae7cff WD |
1022 | return -ENOMEM; |
1023 | } | |
1024 | ||
44680eed | 1025 | /* Allocate the SMRs on the SMMU */ |
a9a1b0b5 | 1026 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1027 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, |
1028 | smmu->num_mapping_groups); | |
1029 | if (IS_ERR_VALUE(idx)) { | |
1030 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | |
1031 | goto err_free_smrs; | |
1032 | } | |
1033 | ||
1034 | smrs[i] = (struct arm_smmu_smr) { | |
1035 | .idx = idx, | |
1036 | .mask = 0, /* We don't currently share SMRs */ | |
a9a1b0b5 | 1037 | .id = cfg->streamids[i], |
45ae7cff WD |
1038 | }; |
1039 | } | |
1040 | ||
1041 | /* It worked! Now, poke the actual hardware */ | |
a9a1b0b5 | 1042 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1043 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | |
1044 | smrs[i].mask << SMR_MASK_SHIFT; | |
1045 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | |
1046 | } | |
1047 | ||
a9a1b0b5 | 1048 | cfg->smrs = smrs; |
45ae7cff WD |
1049 | return 0; |
1050 | ||
1051 | err_free_smrs: | |
1052 | while (--i >= 0) | |
1053 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | |
1054 | kfree(smrs); | |
1055 | return -ENOSPC; | |
1056 | } | |
1057 | ||
1058 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 1059 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1060 | { |
1061 | int i; | |
1062 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
a9a1b0b5 | 1063 | struct arm_smmu_smr *smrs = cfg->smrs; |
45ae7cff | 1064 | |
43b412be WD |
1065 | if (!smrs) |
1066 | return; | |
1067 | ||
45ae7cff | 1068 | /* Invalidate the SMRs before freeing back to the allocator */ |
a9a1b0b5 | 1069 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1070 | u8 idx = smrs[i].idx; |
2907320d | 1071 | |
45ae7cff WD |
1072 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); |
1073 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | |
1074 | } | |
1075 | ||
a9a1b0b5 | 1076 | cfg->smrs = NULL; |
45ae7cff WD |
1077 | kfree(smrs); |
1078 | } | |
1079 | ||
45ae7cff | 1080 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
a9a1b0b5 | 1081 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1082 | { |
1083 | int i, ret; | |
44680eed | 1084 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
45ae7cff WD |
1085 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1086 | ||
8f68f8e2 | 1087 | /* Devices in an IOMMU group may already be configured */ |
a9a1b0b5 | 1088 | ret = arm_smmu_master_configure_smrs(smmu, cfg); |
45ae7cff | 1089 | if (ret) |
8f68f8e2 | 1090 | return ret == -EEXIST ? 0 : ret; |
45ae7cff | 1091 | |
a9a1b0b5 | 1092 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1093 | u32 idx, s2cr; |
2907320d | 1094 | |
a9a1b0b5 | 1095 | idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; |
6069d23c | 1096 | s2cr = S2CR_TYPE_TRANS | |
44680eed | 1097 | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); |
45ae7cff WD |
1098 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); |
1099 | } | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
1104 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | |
a9a1b0b5 | 1105 | struct arm_smmu_master_cfg *cfg) |
45ae7cff | 1106 | { |
43b412be | 1107 | int i; |
44680eed | 1108 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
43b412be | 1109 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
45ae7cff | 1110 | |
8f68f8e2 WD |
1111 | /* An IOMMU group is torn down by the first device to be removed */ |
1112 | if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) | |
1113 | return; | |
45ae7cff WD |
1114 | |
1115 | /* | |
1116 | * We *must* clear the S2CR first, because freeing the SMR means | |
1117 | * that it can be re-allocated immediately. | |
1118 | */ | |
43b412be WD |
1119 | for (i = 0; i < cfg->num_streamids; ++i) { |
1120 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | |
1121 | ||
1122 | writel_relaxed(S2CR_TYPE_BYPASS, | |
1123 | gr0_base + ARM_SMMU_GR0_S2CR(idx)); | |
1124 | } | |
1125 | ||
a9a1b0b5 | 1126 | arm_smmu_master_free_smrs(smmu, cfg); |
45ae7cff WD |
1127 | } |
1128 | ||
1129 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
1130 | { | |
a18037b2 | 1131 | int ret; |
1d672638 | 1132 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1133 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 1134 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1135 | |
8f68f8e2 | 1136 | smmu = find_smmu_for_device(dev); |
44680eed | 1137 | if (!smmu) { |
45ae7cff WD |
1138 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
1139 | return -ENXIO; | |
1140 | } | |
1141 | ||
844e35bd WD |
1142 | if (dev->archdata.iommu) { |
1143 | dev_err(dev, "already attached to IOMMU domain\n"); | |
1144 | return -EEXIST; | |
1145 | } | |
1146 | ||
518f7136 WD |
1147 | /* Ensure that the domain is finalised */ |
1148 | ret = arm_smmu_init_domain_context(domain, smmu); | |
1149 | if (IS_ERR_VALUE(ret)) | |
1150 | return ret; | |
1151 | ||
45ae7cff | 1152 | /* |
44680eed WD |
1153 | * Sanity check the domain. We don't support domains across |
1154 | * different SMMUs. | |
45ae7cff | 1155 | */ |
518f7136 | 1156 | if (smmu_domain->smmu != smmu) { |
45ae7cff WD |
1157 | dev_err(dev, |
1158 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | |
a18037b2 MH |
1159 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
1160 | return -EINVAL; | |
45ae7cff | 1161 | } |
45ae7cff WD |
1162 | |
1163 | /* Looks ok, so add the device to the domain */ | |
8f68f8e2 | 1164 | cfg = find_smmu_master_cfg(dev); |
a9a1b0b5 | 1165 | if (!cfg) |
45ae7cff WD |
1166 | return -ENODEV; |
1167 | ||
844e35bd WD |
1168 | ret = arm_smmu_domain_add_master(smmu_domain, cfg); |
1169 | if (!ret) | |
1170 | dev->archdata.iommu = domain; | |
45ae7cff WD |
1171 | return ret; |
1172 | } | |
1173 | ||
1174 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
1175 | { | |
1d672638 | 1176 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
a9a1b0b5 | 1177 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1178 | |
8f68f8e2 | 1179 | cfg = find_smmu_master_cfg(dev); |
844e35bd WD |
1180 | if (!cfg) |
1181 | return; | |
1182 | ||
1183 | dev->archdata.iommu = NULL; | |
1184 | arm_smmu_domain_remove_master(smmu_domain, cfg); | |
45ae7cff WD |
1185 | } |
1186 | ||
45ae7cff | 1187 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
b410aed9 | 1188 | phys_addr_t paddr, size_t size, int prot) |
45ae7cff | 1189 | { |
518f7136 WD |
1190 | int ret; |
1191 | unsigned long flags; | |
1d672638 | 1192 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1193 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1194 | |
518f7136 | 1195 | if (!ops) |
45ae7cff WD |
1196 | return -ENODEV; |
1197 | ||
518f7136 WD |
1198 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
1199 | ret = ops->map(ops, iova, paddr, size, prot); | |
1200 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1201 | return ret; | |
45ae7cff WD |
1202 | } |
1203 | ||
1204 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |
1205 | size_t size) | |
1206 | { | |
518f7136 WD |
1207 | size_t ret; |
1208 | unsigned long flags; | |
1d672638 | 1209 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1210 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1211 | |
518f7136 WD |
1212 | if (!ops) |
1213 | return 0; | |
1214 | ||
1215 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | |
1216 | ret = ops->unmap(ops, iova, size); | |
1217 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1218 | return ret; | |
45ae7cff WD |
1219 | } |
1220 | ||
859a732e MH |
1221 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |
1222 | dma_addr_t iova) | |
1223 | { | |
1d672638 | 1224 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
859a732e MH |
1225 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1226 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1227 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | |
1228 | struct device *dev = smmu->dev; | |
1229 | void __iomem *cb_base; | |
1230 | u32 tmp; | |
1231 | u64 phys; | |
661d962f | 1232 | unsigned long va; |
859a732e MH |
1233 | |
1234 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
1235 | ||
661d962f RM |
1236 | /* ATS1 registers can only be written atomically */ |
1237 | va = iova & ~0xfffUL; | |
1238 | #ifdef CONFIG_64BIT | |
1239 | if (smmu->version == ARM_SMMU_V2) | |
1240 | writeq_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); | |
1241 | else | |
1242 | #endif | |
1243 | writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); | |
859a732e MH |
1244 | |
1245 | if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, | |
1246 | !(tmp & ATSR_ACTIVE), 5, 50)) { | |
1247 | dev_err(dev, | |
1248 | "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", | |
1249 | &iova); | |
1250 | return ops->iova_to_phys(ops, iova); | |
1251 | } | |
1252 | ||
1253 | phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); | |
1254 | phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; | |
1255 | ||
1256 | if (phys & CB_PAR_F) { | |
1257 | dev_err(dev, "translation fault!\n"); | |
1258 | dev_err(dev, "PAR = 0x%llx\n", phys); | |
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); | |
1263 | } | |
1264 | ||
45ae7cff | 1265 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, |
859a732e | 1266 | dma_addr_t iova) |
45ae7cff | 1267 | { |
518f7136 WD |
1268 | phys_addr_t ret; |
1269 | unsigned long flags; | |
1d672638 | 1270 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1271 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1272 | |
518f7136 | 1273 | if (!ops) |
a44a9791 | 1274 | return 0; |
45ae7cff | 1275 | |
518f7136 | 1276 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
83a60ed8 BR |
1277 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && |
1278 | smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | |
859a732e | 1279 | ret = arm_smmu_iova_to_phys_hard(domain, iova); |
83a60ed8 | 1280 | } else { |
859a732e | 1281 | ret = ops->iova_to_phys(ops, iova); |
83a60ed8 BR |
1282 | } |
1283 | ||
518f7136 | 1284 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
859a732e | 1285 | |
518f7136 | 1286 | return ret; |
45ae7cff WD |
1287 | } |
1288 | ||
1fd0c775 | 1289 | static bool arm_smmu_capable(enum iommu_cap cap) |
45ae7cff | 1290 | { |
d0948945 WD |
1291 | switch (cap) { |
1292 | case IOMMU_CAP_CACHE_COHERENCY: | |
1fd0c775 JR |
1293 | /* |
1294 | * Return true here as the SMMU can always send out coherent | |
1295 | * requests. | |
1296 | */ | |
1297 | return true; | |
d0948945 | 1298 | case IOMMU_CAP_INTR_REMAP: |
1fd0c775 | 1299 | return true; /* MSIs are just memory writes */ |
0029a8dd AM |
1300 | case IOMMU_CAP_NOEXEC: |
1301 | return true; | |
d0948945 | 1302 | default: |
1fd0c775 | 1303 | return false; |
d0948945 | 1304 | } |
45ae7cff | 1305 | } |
45ae7cff | 1306 | |
a9a1b0b5 WD |
1307 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) |
1308 | { | |
1309 | *((u16 *)data) = alias; | |
1310 | return 0; /* Continue walking */ | |
45ae7cff WD |
1311 | } |
1312 | ||
8f68f8e2 WD |
1313 | static void __arm_smmu_release_pci_iommudata(void *data) |
1314 | { | |
1315 | kfree(data); | |
1316 | } | |
1317 | ||
03edb226 | 1318 | static int arm_smmu_add_pci_device(struct pci_dev *pdev) |
45ae7cff | 1319 | { |
03edb226 WD |
1320 | int i, ret; |
1321 | u16 sid; | |
5fc63a7c | 1322 | struct iommu_group *group; |
03edb226 | 1323 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1324 | |
03edb226 WD |
1325 | group = iommu_group_get_for_dev(&pdev->dev); |
1326 | if (IS_ERR(group)) | |
5fc63a7c | 1327 | return PTR_ERR(group); |
a9a1b0b5 | 1328 | |
03edb226 WD |
1329 | cfg = iommu_group_get_iommudata(group); |
1330 | if (!cfg) { | |
a9a1b0b5 WD |
1331 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
1332 | if (!cfg) { | |
1333 | ret = -ENOMEM; | |
1334 | goto out_put_group; | |
1335 | } | |
1336 | ||
03edb226 WD |
1337 | iommu_group_set_iommudata(group, cfg, |
1338 | __arm_smmu_release_pci_iommudata); | |
1339 | } | |
8f68f8e2 | 1340 | |
03edb226 WD |
1341 | if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { |
1342 | ret = -ENOSPC; | |
1343 | goto out_put_group; | |
a9a1b0b5 WD |
1344 | } |
1345 | ||
03edb226 WD |
1346 | /* |
1347 | * Assume Stream ID == Requester ID for now. | |
1348 | * We need a way to describe the ID mappings in FDT. | |
1349 | */ | |
1350 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); | |
1351 | for (i = 0; i < cfg->num_streamids; ++i) | |
1352 | if (cfg->streamids[i] == sid) | |
1353 | break; | |
1354 | ||
1355 | /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ | |
1356 | if (i == cfg->num_streamids) | |
1357 | cfg->streamids[cfg->num_streamids++] = sid; | |
5fc63a7c | 1358 | |
03edb226 | 1359 | return 0; |
a9a1b0b5 WD |
1360 | out_put_group: |
1361 | iommu_group_put(group); | |
5fc63a7c | 1362 | return ret; |
45ae7cff WD |
1363 | } |
1364 | ||
03edb226 WD |
1365 | static int arm_smmu_add_platform_device(struct device *dev) |
1366 | { | |
1367 | struct iommu_group *group; | |
1368 | struct arm_smmu_master *master; | |
1369 | struct arm_smmu_device *smmu = find_smmu_for_device(dev); | |
1370 | ||
1371 | if (!smmu) | |
1372 | return -ENODEV; | |
1373 | ||
1374 | master = find_smmu_master(smmu, dev->of_node); | |
1375 | if (!master) | |
1376 | return -ENODEV; | |
1377 | ||
1378 | /* No automatic group creation for platform devices */ | |
1379 | group = iommu_group_alloc(); | |
1380 | if (IS_ERR(group)) | |
1381 | return PTR_ERR(group); | |
1382 | ||
1383 | iommu_group_set_iommudata(group, &master->cfg, NULL); | |
1384 | return iommu_group_add_device(group, dev); | |
1385 | } | |
1386 | ||
1387 | static int arm_smmu_add_device(struct device *dev) | |
1388 | { | |
1389 | if (dev_is_pci(dev)) | |
1390 | return arm_smmu_add_pci_device(to_pci_dev(dev)); | |
1391 | ||
1392 | return arm_smmu_add_platform_device(dev); | |
1393 | } | |
1394 | ||
45ae7cff WD |
1395 | static void arm_smmu_remove_device(struct device *dev) |
1396 | { | |
5fc63a7c | 1397 | iommu_group_remove_device(dev); |
45ae7cff WD |
1398 | } |
1399 | ||
c752ce45 WD |
1400 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
1401 | enum iommu_attr attr, void *data) | |
1402 | { | |
1d672638 | 1403 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
c752ce45 WD |
1404 | |
1405 | switch (attr) { | |
1406 | case DOMAIN_ATTR_NESTING: | |
1407 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | |
1408 | return 0; | |
1409 | default: | |
1410 | return -ENODEV; | |
1411 | } | |
1412 | } | |
1413 | ||
1414 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |
1415 | enum iommu_attr attr, void *data) | |
1416 | { | |
518f7136 | 1417 | int ret = 0; |
1d672638 | 1418 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
c752ce45 | 1419 | |
518f7136 WD |
1420 | mutex_lock(&smmu_domain->init_mutex); |
1421 | ||
c752ce45 WD |
1422 | switch (attr) { |
1423 | case DOMAIN_ATTR_NESTING: | |
518f7136 WD |
1424 | if (smmu_domain->smmu) { |
1425 | ret = -EPERM; | |
1426 | goto out_unlock; | |
1427 | } | |
1428 | ||
c752ce45 WD |
1429 | if (*(int *)data) |
1430 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; | |
1431 | else | |
1432 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
1433 | ||
518f7136 | 1434 | break; |
c752ce45 | 1435 | default: |
518f7136 | 1436 | ret = -ENODEV; |
c752ce45 | 1437 | } |
518f7136 WD |
1438 | |
1439 | out_unlock: | |
1440 | mutex_unlock(&smmu_domain->init_mutex); | |
1441 | return ret; | |
c752ce45 WD |
1442 | } |
1443 | ||
518f7136 | 1444 | static struct iommu_ops arm_smmu_ops = { |
c752ce45 | 1445 | .capable = arm_smmu_capable, |
1d672638 JR |
1446 | .domain_alloc = arm_smmu_domain_alloc, |
1447 | .domain_free = arm_smmu_domain_free, | |
c752ce45 WD |
1448 | .attach_dev = arm_smmu_attach_dev, |
1449 | .detach_dev = arm_smmu_detach_dev, | |
1450 | .map = arm_smmu_map, | |
1451 | .unmap = arm_smmu_unmap, | |
76771c93 | 1452 | .map_sg = default_iommu_map_sg, |
c752ce45 WD |
1453 | .iova_to_phys = arm_smmu_iova_to_phys, |
1454 | .add_device = arm_smmu_add_device, | |
1455 | .remove_device = arm_smmu_remove_device, | |
1456 | .domain_get_attr = arm_smmu_domain_get_attr, | |
1457 | .domain_set_attr = arm_smmu_domain_set_attr, | |
518f7136 | 1458 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
45ae7cff WD |
1459 | }; |
1460 | ||
1461 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |
1462 | { | |
1463 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
659db6f6 | 1464 | void __iomem *cb_base; |
45ae7cff | 1465 | int i = 0; |
659db6f6 AH |
1466 | u32 reg; |
1467 | ||
3a5df8ff AH |
1468 | /* clear global FSR */ |
1469 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
1470 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
45ae7cff WD |
1471 | |
1472 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | |
1473 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | |
3c8766d0 | 1474 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); |
2907320d MH |
1475 | writel_relaxed(S2CR_TYPE_BYPASS, |
1476 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | |
45ae7cff WD |
1477 | } |
1478 | ||
659db6f6 AH |
1479 | /* Make sure all context banks are disabled and clear CB_FSR */ |
1480 | for (i = 0; i < smmu->num_context_banks; ++i) { | |
1481 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); | |
1482 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | |
1483 | writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); | |
1484 | } | |
1463fe44 | 1485 | |
45ae7cff | 1486 | /* Invalidate the TLB, just in case */ |
45ae7cff WD |
1487 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
1488 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | |
1489 | ||
3a5df8ff | 1490 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
659db6f6 | 1491 | |
45ae7cff | 1492 | /* Enable fault reporting */ |
659db6f6 | 1493 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); |
45ae7cff WD |
1494 | |
1495 | /* Disable TLB broadcasting. */ | |
659db6f6 | 1496 | reg |= (sCR0_VMIDPNE | sCR0_PTM); |
45ae7cff WD |
1497 | |
1498 | /* Enable client access, but bypass when no mapping is found */ | |
659db6f6 | 1499 | reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); |
45ae7cff WD |
1500 | |
1501 | /* Disable forced broadcasting */ | |
659db6f6 | 1502 | reg &= ~sCR0_FB; |
45ae7cff WD |
1503 | |
1504 | /* Don't upgrade barriers */ | |
659db6f6 | 1505 | reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); |
45ae7cff WD |
1506 | |
1507 | /* Push the button */ | |
518f7136 | 1508 | __arm_smmu_tlb_sync(smmu); |
3a5df8ff | 1509 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1510 | } |
1511 | ||
1512 | static int arm_smmu_id_size_to_bits(int size) | |
1513 | { | |
1514 | switch (size) { | |
1515 | case 0: | |
1516 | return 32; | |
1517 | case 1: | |
1518 | return 36; | |
1519 | case 2: | |
1520 | return 40; | |
1521 | case 3: | |
1522 | return 42; | |
1523 | case 4: | |
1524 | return 44; | |
1525 | case 5: | |
1526 | default: | |
1527 | return 48; | |
1528 | } | |
1529 | } | |
1530 | ||
1531 | static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |
1532 | { | |
1533 | unsigned long size; | |
1534 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1535 | u32 id; | |
bae2c2d4 | 1536 | bool cttw_dt, cttw_reg; |
45ae7cff WD |
1537 | |
1538 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | |
45ae7cff WD |
1539 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); |
1540 | ||
1541 | /* ID0 */ | |
1542 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | |
4cf740b0 WD |
1543 | |
1544 | /* Restrict available stages based on module parameter */ | |
1545 | if (force_stage == 1) | |
1546 | id &= ~(ID0_S2TS | ID0_NTS); | |
1547 | else if (force_stage == 2) | |
1548 | id &= ~(ID0_S1TS | ID0_NTS); | |
1549 | ||
45ae7cff WD |
1550 | if (id & ID0_S1TS) { |
1551 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | |
1552 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | |
1553 | } | |
1554 | ||
1555 | if (id & ID0_S2TS) { | |
1556 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; | |
1557 | dev_notice(smmu->dev, "\tstage 2 translation\n"); | |
1558 | } | |
1559 | ||
1560 | if (id & ID0_NTS) { | |
1561 | smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; | |
1562 | dev_notice(smmu->dev, "\tnested translation\n"); | |
1563 | } | |
1564 | ||
1565 | if (!(smmu->features & | |
4cf740b0 | 1566 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { |
45ae7cff WD |
1567 | dev_err(smmu->dev, "\tno translation support!\n"); |
1568 | return -ENODEV; | |
1569 | } | |
1570 | ||
d38f0ff9 | 1571 | if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { |
859a732e MH |
1572 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; |
1573 | dev_notice(smmu->dev, "\taddress translation ops\n"); | |
1574 | } | |
1575 | ||
bae2c2d4 RM |
1576 | /* |
1577 | * In order for DMA API calls to work properly, we must defer to what | |
1578 | * the DT says about coherency, regardless of what the hardware claims. | |
1579 | * Fortunately, this also opens up a workaround for systems where the | |
1580 | * ID register value has ended up configured incorrectly. | |
1581 | */ | |
1582 | cttw_dt = of_dma_is_coherent(smmu->dev->of_node); | |
1583 | cttw_reg = !!(id & ID0_CTTW); | |
1584 | if (cttw_dt) | |
45ae7cff | 1585 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; |
bae2c2d4 RM |
1586 | if (cttw_dt || cttw_reg) |
1587 | dev_notice(smmu->dev, "\t%scoherent table walk\n", | |
1588 | cttw_dt ? "" : "non-"); | |
1589 | if (cttw_dt != cttw_reg) | |
1590 | dev_notice(smmu->dev, | |
1591 | "\t(IDR0.CTTW overridden by dma-coherent property)\n"); | |
45ae7cff WD |
1592 | |
1593 | if (id & ID0_SMS) { | |
1594 | u32 smr, sid, mask; | |
1595 | ||
1596 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | |
1597 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | |
1598 | ID0_NUMSMRG_MASK; | |
1599 | if (smmu->num_mapping_groups == 0) { | |
1600 | dev_err(smmu->dev, | |
1601 | "stream-matching supported, but no SMRs present!\n"); | |
1602 | return -ENODEV; | |
1603 | } | |
1604 | ||
1605 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | |
1606 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | |
1607 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1608 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1609 | ||
1610 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | |
1611 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | |
1612 | if ((mask & sid) != sid) { | |
1613 | dev_err(smmu->dev, | |
1614 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | |
1615 | mask, sid); | |
1616 | return -ENODEV; | |
1617 | } | |
1618 | ||
1619 | dev_notice(smmu->dev, | |
1620 | "\tstream matching with %u register groups, mask 0x%x", | |
1621 | smmu->num_mapping_groups, mask); | |
3c8766d0 OH |
1622 | } else { |
1623 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | |
1624 | ID0_NUMSIDB_MASK; | |
45ae7cff WD |
1625 | } |
1626 | ||
1627 | /* ID1 */ | |
1628 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | |
c757e852 | 1629 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; |
45ae7cff | 1630 | |
c55af7f7 | 1631 | /* Check for size mismatch of SMMU address space from mapped region */ |
518f7136 | 1632 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
c757e852 | 1633 | size *= 2 << smmu->pgshift; |
c55af7f7 | 1634 | if (smmu->size != size) |
2907320d MH |
1635 | dev_warn(smmu->dev, |
1636 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | |
1637 | size, smmu->size); | |
45ae7cff | 1638 | |
518f7136 | 1639 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; |
45ae7cff WD |
1640 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; |
1641 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | |
1642 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | |
1643 | return -ENODEV; | |
1644 | } | |
1645 | dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", | |
1646 | smmu->num_context_banks, smmu->num_s2_context_banks); | |
1647 | ||
1648 | /* ID2 */ | |
1649 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | |
1650 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | |
518f7136 | 1651 | smmu->ipa_size = size; |
45ae7cff | 1652 | |
518f7136 | 1653 | /* The output mask is also applied for bypass */ |
45ae7cff | 1654 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
518f7136 | 1655 | smmu->pa_size = size; |
45ae7cff | 1656 | |
f1d84548 RM |
1657 | /* |
1658 | * What the page table walker can address actually depends on which | |
1659 | * descriptor format is in use, but since a) we don't know that yet, | |
1660 | * and b) it can vary per context bank, this will have to do... | |
1661 | */ | |
1662 | if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) | |
1663 | dev_warn(smmu->dev, | |
1664 | "failed to set DMA mask for table walker\n"); | |
1665 | ||
09360403 | 1666 | if (smmu->version == ARM_SMMU_V1) { |
518f7136 WD |
1667 | smmu->va_size = smmu->ipa_size; |
1668 | size = SZ_4K | SZ_2M | SZ_1G; | |
45ae7cff | 1669 | } else { |
45ae7cff | 1670 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; |
518f7136 WD |
1671 | smmu->va_size = arm_smmu_id_size_to_bits(size); |
1672 | #ifndef CONFIG_64BIT | |
1673 | smmu->va_size = min(32UL, smmu->va_size); | |
45ae7cff | 1674 | #endif |
518f7136 WD |
1675 | size = 0; |
1676 | if (id & ID2_PTFS_4K) | |
1677 | size |= SZ_4K | SZ_2M | SZ_1G; | |
1678 | if (id & ID2_PTFS_16K) | |
1679 | size |= SZ_16K | SZ_32M; | |
1680 | if (id & ID2_PTFS_64K) | |
1681 | size |= SZ_64K | SZ_512M; | |
45ae7cff WD |
1682 | } |
1683 | ||
518f7136 WD |
1684 | arm_smmu_ops.pgsize_bitmap &= size; |
1685 | dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); | |
1686 | ||
28d6007b WD |
1687 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) |
1688 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", | |
518f7136 | 1689 | smmu->va_size, smmu->ipa_size); |
28d6007b WD |
1690 | |
1691 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) | |
1692 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", | |
518f7136 | 1693 | smmu->ipa_size, smmu->pa_size); |
28d6007b | 1694 | |
45ae7cff WD |
1695 | return 0; |
1696 | } | |
1697 | ||
09b5269a | 1698 | static const struct of_device_id arm_smmu_of_match[] = { |
09360403 RM |
1699 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, |
1700 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, | |
1701 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, | |
d3aba046 | 1702 | { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 }, |
09360403 RM |
1703 | { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 }, |
1704 | { }, | |
1705 | }; | |
1706 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | |
1707 | ||
45ae7cff WD |
1708 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1709 | { | |
09360403 | 1710 | const struct of_device_id *of_id; |
45ae7cff WD |
1711 | struct resource *res; |
1712 | struct arm_smmu_device *smmu; | |
45ae7cff WD |
1713 | struct device *dev = &pdev->dev; |
1714 | struct rb_node *node; | |
1715 | struct of_phandle_args masterspec; | |
1716 | int num_irqs, i, err; | |
1717 | ||
1718 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | |
1719 | if (!smmu) { | |
1720 | dev_err(dev, "failed to allocate arm_smmu_device\n"); | |
1721 | return -ENOMEM; | |
1722 | } | |
1723 | smmu->dev = dev; | |
1724 | ||
09360403 RM |
1725 | of_id = of_match_node(arm_smmu_of_match, dev->of_node); |
1726 | smmu->version = (enum arm_smmu_arch_version)of_id->data; | |
1727 | ||
45ae7cff | 1728 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
8a7f4312 JL |
1729 | smmu->base = devm_ioremap_resource(dev, res); |
1730 | if (IS_ERR(smmu->base)) | |
1731 | return PTR_ERR(smmu->base); | |
45ae7cff | 1732 | smmu->size = resource_size(res); |
45ae7cff WD |
1733 | |
1734 | if (of_property_read_u32(dev->of_node, "#global-interrupts", | |
1735 | &smmu->num_global_irqs)) { | |
1736 | dev_err(dev, "missing #global-interrupts property\n"); | |
1737 | return -ENODEV; | |
1738 | } | |
1739 | ||
1740 | num_irqs = 0; | |
1741 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | |
1742 | num_irqs++; | |
1743 | if (num_irqs > smmu->num_global_irqs) | |
1744 | smmu->num_context_irqs++; | |
1745 | } | |
1746 | ||
44a08de2 AH |
1747 | if (!smmu->num_context_irqs) { |
1748 | dev_err(dev, "found %d interrupts but expected at least %d\n", | |
1749 | num_irqs, smmu->num_global_irqs + 1); | |
1750 | return -ENODEV; | |
45ae7cff | 1751 | } |
45ae7cff WD |
1752 | |
1753 | smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, | |
1754 | GFP_KERNEL); | |
1755 | if (!smmu->irqs) { | |
1756 | dev_err(dev, "failed to allocate %d irqs\n", num_irqs); | |
1757 | return -ENOMEM; | |
1758 | } | |
1759 | ||
1760 | for (i = 0; i < num_irqs; ++i) { | |
1761 | int irq = platform_get_irq(pdev, i); | |
2907320d | 1762 | |
45ae7cff WD |
1763 | if (irq < 0) { |
1764 | dev_err(dev, "failed to get irq index %d\n", i); | |
1765 | return -ENODEV; | |
1766 | } | |
1767 | smmu->irqs[i] = irq; | |
1768 | } | |
1769 | ||
3c8766d0 OH |
1770 | err = arm_smmu_device_cfg_probe(smmu); |
1771 | if (err) | |
1772 | return err; | |
1773 | ||
45ae7cff WD |
1774 | i = 0; |
1775 | smmu->masters = RB_ROOT; | |
1776 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | |
1777 | "#stream-id-cells", i, | |
1778 | &masterspec)) { | |
1779 | err = register_smmu_master(smmu, dev, &masterspec); | |
1780 | if (err) { | |
1781 | dev_err(dev, "failed to add master %s\n", | |
1782 | masterspec.np->name); | |
1783 | goto out_put_masters; | |
1784 | } | |
1785 | ||
1786 | i++; | |
1787 | } | |
1788 | dev_notice(dev, "registered %d master devices\n", i); | |
1789 | ||
3a5df8ff AH |
1790 | parse_driver_options(smmu); |
1791 | ||
09360403 | 1792 | if (smmu->version > ARM_SMMU_V1 && |
45ae7cff WD |
1793 | smmu->num_context_banks != smmu->num_context_irqs) { |
1794 | dev_err(dev, | |
1795 | "found only %d context interrupt(s) but %d required\n", | |
1796 | smmu->num_context_irqs, smmu->num_context_banks); | |
89a23cde | 1797 | err = -ENODEV; |
44680eed | 1798 | goto out_put_masters; |
45ae7cff WD |
1799 | } |
1800 | ||
45ae7cff WD |
1801 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
1802 | err = request_irq(smmu->irqs[i], | |
1803 | arm_smmu_global_fault, | |
1804 | IRQF_SHARED, | |
1805 | "arm-smmu global fault", | |
1806 | smmu); | |
1807 | if (err) { | |
1808 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | |
1809 | i, smmu->irqs[i]); | |
1810 | goto out_free_irqs; | |
1811 | } | |
1812 | } | |
1813 | ||
1814 | INIT_LIST_HEAD(&smmu->list); | |
1815 | spin_lock(&arm_smmu_devices_lock); | |
1816 | list_add(&smmu->list, &arm_smmu_devices); | |
1817 | spin_unlock(&arm_smmu_devices_lock); | |
fd90cecb WD |
1818 | |
1819 | arm_smmu_device_reset(smmu); | |
45ae7cff WD |
1820 | return 0; |
1821 | ||
1822 | out_free_irqs: | |
1823 | while (i--) | |
1824 | free_irq(smmu->irqs[i], smmu); | |
1825 | ||
45ae7cff WD |
1826 | out_put_masters: |
1827 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | |
2907320d MH |
1828 | struct arm_smmu_master *master |
1829 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1830 | of_node_put(master->of_node); |
1831 | } | |
1832 | ||
1833 | return err; | |
1834 | } | |
1835 | ||
1836 | static int arm_smmu_device_remove(struct platform_device *pdev) | |
1837 | { | |
1838 | int i; | |
1839 | struct device *dev = &pdev->dev; | |
1840 | struct arm_smmu_device *curr, *smmu = NULL; | |
1841 | struct rb_node *node; | |
1842 | ||
1843 | spin_lock(&arm_smmu_devices_lock); | |
1844 | list_for_each_entry(curr, &arm_smmu_devices, list) { | |
1845 | if (curr->dev == dev) { | |
1846 | smmu = curr; | |
1847 | list_del(&smmu->list); | |
1848 | break; | |
1849 | } | |
1850 | } | |
1851 | spin_unlock(&arm_smmu_devices_lock); | |
1852 | ||
1853 | if (!smmu) | |
1854 | return -ENODEV; | |
1855 | ||
45ae7cff | 1856 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { |
2907320d MH |
1857 | struct arm_smmu_master *master |
1858 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1859 | of_node_put(master->of_node); |
1860 | } | |
1861 | ||
ecfadb6e | 1862 | if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) |
45ae7cff WD |
1863 | dev_err(dev, "removing device with active domains!\n"); |
1864 | ||
1865 | for (i = 0; i < smmu->num_global_irqs; ++i) | |
1866 | free_irq(smmu->irqs[i], smmu); | |
1867 | ||
1868 | /* Turn the thing off */ | |
2907320d | 1869 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1870 | return 0; |
1871 | } | |
1872 | ||
45ae7cff WD |
1873 | static struct platform_driver arm_smmu_driver = { |
1874 | .driver = { | |
45ae7cff WD |
1875 | .name = "arm-smmu", |
1876 | .of_match_table = of_match_ptr(arm_smmu_of_match), | |
1877 | }, | |
1878 | .probe = arm_smmu_device_dt_probe, | |
1879 | .remove = arm_smmu_device_remove, | |
1880 | }; | |
1881 | ||
1882 | static int __init arm_smmu_init(void) | |
1883 | { | |
0e7d37ad | 1884 | struct device_node *np; |
45ae7cff WD |
1885 | int ret; |
1886 | ||
0e7d37ad TR |
1887 | /* |
1888 | * Play nice with systems that don't have an ARM SMMU by checking that | |
1889 | * an ARM SMMU exists in the system before proceeding with the driver | |
1890 | * and IOMMU bus operation registration. | |
1891 | */ | |
1892 | np = of_find_matching_node(NULL, arm_smmu_of_match); | |
1893 | if (!np) | |
1894 | return 0; | |
1895 | ||
1896 | of_node_put(np); | |
1897 | ||
45ae7cff WD |
1898 | ret = platform_driver_register(&arm_smmu_driver); |
1899 | if (ret) | |
1900 | return ret; | |
1901 | ||
1902 | /* Oh, for a proper bus abstraction */ | |
6614ee77 | 1903 | if (!iommu_present(&platform_bus_type)) |
45ae7cff WD |
1904 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1905 | ||
d123cf82 | 1906 | #ifdef CONFIG_ARM_AMBA |
6614ee77 | 1907 | if (!iommu_present(&amba_bustype)) |
45ae7cff | 1908 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
d123cf82 | 1909 | #endif |
45ae7cff | 1910 | |
a9a1b0b5 WD |
1911 | #ifdef CONFIG_PCI |
1912 | if (!iommu_present(&pci_bus_type)) | |
1913 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | |
1914 | #endif | |
1915 | ||
45ae7cff WD |
1916 | return 0; |
1917 | } | |
1918 | ||
1919 | static void __exit arm_smmu_exit(void) | |
1920 | { | |
1921 | return platform_driver_unregister(&arm_smmu_driver); | |
1922 | } | |
1923 | ||
b1950b27 | 1924 | subsys_initcall(arm_smmu_init); |
45ae7cff WD |
1925 | module_exit(arm_smmu_exit); |
1926 | ||
1927 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | |
1928 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | |
1929 | MODULE_LICENSE("GPL v2"); |