Commit | Line | Data |
---|---|---|
45ae7cff WD |
1 | /* |
2 | * IOMMU API for ARM architected SMMU implementations. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
16 | * | |
17 | * Copyright (C) 2013 ARM Limited | |
18 | * | |
19 | * Author: Will Deacon <will.deacon@arm.com> | |
20 | * | |
21 | * This driver currently supports: | |
22 | * - SMMUv1 and v2 implementations | |
23 | * - Stream-matching and stream-indexing | |
24 | * - v7/v8 long-descriptor format | |
25 | * - Non-secure access to the SMMU | |
45ae7cff WD |
26 | * - Context fault reporting |
27 | */ | |
28 | ||
29 | #define pr_fmt(fmt) "arm-smmu: " fmt | |
30 | ||
31 | #include <linux/delay.h> | |
32 | #include <linux/dma-mapping.h> | |
33 | #include <linux/err.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/io.h> | |
36 | #include <linux/iommu.h> | |
859a732e | 37 | #include <linux/iopoll.h> |
45ae7cff WD |
38 | #include <linux/module.h> |
39 | #include <linux/of.h> | |
a9a1b0b5 | 40 | #include <linux/pci.h> |
45ae7cff WD |
41 | #include <linux/platform_device.h> |
42 | #include <linux/slab.h> | |
43 | #include <linux/spinlock.h> | |
44 | ||
45 | #include <linux/amba/bus.h> | |
46 | ||
518f7136 | 47 | #include "io-pgtable.h" |
45ae7cff WD |
48 | |
49 | /* Maximum number of stream IDs assigned to a single device */ | |
636e97b0 | 50 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
45ae7cff WD |
51 | |
52 | /* Maximum number of context banks per SMMU */ | |
53 | #define ARM_SMMU_MAX_CBS 128 | |
54 | ||
55 | /* Maximum number of mapping groups per SMMU */ | |
56 | #define ARM_SMMU_MAX_SMRS 128 | |
57 | ||
45ae7cff WD |
58 | /* SMMU global address space */ |
59 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | |
c757e852 | 60 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) |
45ae7cff | 61 | |
3a5df8ff AH |
62 | /* |
63 | * SMMU global address space with conditional offset to access secure | |
64 | * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, | |
65 | * nsGFSYNR0: 0x450) | |
66 | */ | |
67 | #define ARM_SMMU_GR0_NS(smmu) \ | |
68 | ((smmu)->base + \ | |
69 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | |
70 | ? 0x400 : 0)) | |
71 | ||
45ae7cff WD |
72 | /* Configuration registers */ |
73 | #define ARM_SMMU_GR0_sCR0 0x0 | |
74 | #define sCR0_CLIENTPD (1 << 0) | |
75 | #define sCR0_GFRE (1 << 1) | |
76 | #define sCR0_GFIE (1 << 2) | |
77 | #define sCR0_GCFGFRE (1 << 4) | |
78 | #define sCR0_GCFGFIE (1 << 5) | |
79 | #define sCR0_USFCFG (1 << 10) | |
80 | #define sCR0_VMIDPNE (1 << 11) | |
81 | #define sCR0_PTM (1 << 12) | |
82 | #define sCR0_FB (1 << 13) | |
83 | #define sCR0_BSU_SHIFT 14 | |
84 | #define sCR0_BSU_MASK 0x3 | |
85 | ||
86 | /* Identification registers */ | |
87 | #define ARM_SMMU_GR0_ID0 0x20 | |
88 | #define ARM_SMMU_GR0_ID1 0x24 | |
89 | #define ARM_SMMU_GR0_ID2 0x28 | |
90 | #define ARM_SMMU_GR0_ID3 0x2c | |
91 | #define ARM_SMMU_GR0_ID4 0x30 | |
92 | #define ARM_SMMU_GR0_ID5 0x34 | |
93 | #define ARM_SMMU_GR0_ID6 0x38 | |
94 | #define ARM_SMMU_GR0_ID7 0x3c | |
95 | #define ARM_SMMU_GR0_sGFSR 0x48 | |
96 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | |
97 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | |
98 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | |
45ae7cff WD |
99 | |
100 | #define ID0_S1TS (1 << 30) | |
101 | #define ID0_S2TS (1 << 29) | |
102 | #define ID0_NTS (1 << 28) | |
103 | #define ID0_SMS (1 << 27) | |
859a732e | 104 | #define ID0_ATOSNS (1 << 26) |
45ae7cff WD |
105 | #define ID0_CTTW (1 << 14) |
106 | #define ID0_NUMIRPT_SHIFT 16 | |
107 | #define ID0_NUMIRPT_MASK 0xff | |
3c8766d0 OH |
108 | #define ID0_NUMSIDB_SHIFT 9 |
109 | #define ID0_NUMSIDB_MASK 0xf | |
45ae7cff WD |
110 | #define ID0_NUMSMRG_SHIFT 0 |
111 | #define ID0_NUMSMRG_MASK 0xff | |
112 | ||
113 | #define ID1_PAGESIZE (1 << 31) | |
114 | #define ID1_NUMPAGENDXB_SHIFT 28 | |
115 | #define ID1_NUMPAGENDXB_MASK 7 | |
116 | #define ID1_NUMS2CB_SHIFT 16 | |
117 | #define ID1_NUMS2CB_MASK 0xff | |
118 | #define ID1_NUMCB_SHIFT 0 | |
119 | #define ID1_NUMCB_MASK 0xff | |
120 | ||
121 | #define ID2_OAS_SHIFT 4 | |
122 | #define ID2_OAS_MASK 0xf | |
123 | #define ID2_IAS_SHIFT 0 | |
124 | #define ID2_IAS_MASK 0xf | |
125 | #define ID2_UBS_SHIFT 8 | |
126 | #define ID2_UBS_MASK 0xf | |
127 | #define ID2_PTFS_4K (1 << 12) | |
128 | #define ID2_PTFS_16K (1 << 13) | |
129 | #define ID2_PTFS_64K (1 << 14) | |
130 | ||
45ae7cff | 131 | /* Global TLB invalidation */ |
45ae7cff WD |
132 | #define ARM_SMMU_GR0_TLBIVMID 0x64 |
133 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | |
134 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | |
135 | #define ARM_SMMU_GR0_sTLBGSYNC 0x70 | |
136 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | |
137 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | |
138 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | |
139 | ||
140 | /* Stream mapping registers */ | |
141 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | |
142 | #define SMR_VALID (1 << 31) | |
143 | #define SMR_MASK_SHIFT 16 | |
144 | #define SMR_MASK_MASK 0x7fff | |
145 | #define SMR_ID_SHIFT 0 | |
146 | #define SMR_ID_MASK 0x7fff | |
147 | ||
148 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | |
149 | #define S2CR_CBNDX_SHIFT 0 | |
150 | #define S2CR_CBNDX_MASK 0xff | |
151 | #define S2CR_TYPE_SHIFT 16 | |
152 | #define S2CR_TYPE_MASK 0x3 | |
153 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | |
154 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | |
155 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | |
156 | ||
157 | /* Context bank attribute registers */ | |
158 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | |
159 | #define CBAR_VMID_SHIFT 0 | |
160 | #define CBAR_VMID_MASK 0xff | |
57ca90f6 WD |
161 | #define CBAR_S1_BPSHCFG_SHIFT 8 |
162 | #define CBAR_S1_BPSHCFG_MASK 3 | |
163 | #define CBAR_S1_BPSHCFG_NSH 3 | |
45ae7cff WD |
164 | #define CBAR_S1_MEMATTR_SHIFT 12 |
165 | #define CBAR_S1_MEMATTR_MASK 0xf | |
166 | #define CBAR_S1_MEMATTR_WB 0xf | |
167 | #define CBAR_TYPE_SHIFT 16 | |
168 | #define CBAR_TYPE_MASK 0x3 | |
169 | #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) | |
170 | #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) | |
171 | #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) | |
172 | #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) | |
173 | #define CBAR_IRPTNDX_SHIFT 24 | |
174 | #define CBAR_IRPTNDX_MASK 0xff | |
175 | ||
176 | #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) | |
177 | #define CBA2R_RW64_32BIT (0 << 0) | |
178 | #define CBA2R_RW64_64BIT (1 << 0) | |
179 | ||
180 | /* Translation context bank */ | |
181 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | |
c757e852 | 182 | #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) |
45ae7cff WD |
183 | |
184 | #define ARM_SMMU_CB_SCTLR 0x0 | |
185 | #define ARM_SMMU_CB_RESUME 0x8 | |
186 | #define ARM_SMMU_CB_TTBCR2 0x10 | |
187 | #define ARM_SMMU_CB_TTBR0_LO 0x20 | |
188 | #define ARM_SMMU_CB_TTBR0_HI 0x24 | |
518f7136 WD |
189 | #define ARM_SMMU_CB_TTBR1_LO 0x28 |
190 | #define ARM_SMMU_CB_TTBR1_HI 0x2c | |
45ae7cff WD |
191 | #define ARM_SMMU_CB_TTBCR 0x30 |
192 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | |
518f7136 | 193 | #define ARM_SMMU_CB_S1_MAIR1 0x3c |
859a732e MH |
194 | #define ARM_SMMU_CB_PAR_LO 0x50 |
195 | #define ARM_SMMU_CB_PAR_HI 0x54 | |
45ae7cff WD |
196 | #define ARM_SMMU_CB_FSR 0x58 |
197 | #define ARM_SMMU_CB_FAR_LO 0x60 | |
198 | #define ARM_SMMU_CB_FAR_HI 0x64 | |
199 | #define ARM_SMMU_CB_FSYNR0 0x68 | |
518f7136 | 200 | #define ARM_SMMU_CB_S1_TLBIVA 0x600 |
1463fe44 | 201 | #define ARM_SMMU_CB_S1_TLBIASID 0x610 |
518f7136 WD |
202 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 |
203 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 | |
204 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 | |
859a732e MH |
205 | #define ARM_SMMU_CB_ATS1PR_LO 0x800 |
206 | #define ARM_SMMU_CB_ATS1PR_HI 0x804 | |
207 | #define ARM_SMMU_CB_ATSR 0x8f0 | |
45ae7cff WD |
208 | |
209 | #define SCTLR_S1_ASIDPNE (1 << 12) | |
210 | #define SCTLR_CFCFG (1 << 7) | |
211 | #define SCTLR_CFIE (1 << 6) | |
212 | #define SCTLR_CFRE (1 << 5) | |
213 | #define SCTLR_E (1 << 4) | |
214 | #define SCTLR_AFE (1 << 2) | |
215 | #define SCTLR_TRE (1 << 1) | |
216 | #define SCTLR_M (1 << 0) | |
217 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | |
218 | ||
859a732e MH |
219 | #define CB_PAR_F (1 << 0) |
220 | ||
221 | #define ATSR_ACTIVE (1 << 0) | |
222 | ||
45ae7cff WD |
223 | #define RESUME_RETRY (0 << 0) |
224 | #define RESUME_TERMINATE (1 << 0) | |
225 | ||
45ae7cff WD |
226 | #define TTBCR2_SEP_SHIFT 15 |
227 | #define TTBCR2_SEP_MASK 0x7 | |
228 | ||
45ae7cff WD |
229 | #define TTBCR2_ADDR_32 0 |
230 | #define TTBCR2_ADDR_36 1 | |
231 | #define TTBCR2_ADDR_40 2 | |
232 | #define TTBCR2_ADDR_42 3 | |
233 | #define TTBCR2_ADDR_44 4 | |
234 | #define TTBCR2_ADDR_48 5 | |
235 | ||
518f7136 | 236 | #define TTBRn_HI_ASID_SHIFT 16 |
45ae7cff WD |
237 | |
238 | #define FSR_MULTI (1 << 31) | |
239 | #define FSR_SS (1 << 30) | |
240 | #define FSR_UUT (1 << 8) | |
241 | #define FSR_ASF (1 << 7) | |
242 | #define FSR_TLBLKF (1 << 6) | |
243 | #define FSR_TLBMCF (1 << 5) | |
244 | #define FSR_EF (1 << 4) | |
245 | #define FSR_PF (1 << 3) | |
246 | #define FSR_AFF (1 << 2) | |
247 | #define FSR_TF (1 << 1) | |
248 | ||
2907320d MH |
249 | #define FSR_IGN (FSR_AFF | FSR_ASF | \ |
250 | FSR_TLBMCF | FSR_TLBLKF) | |
251 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | |
adaba320 | 252 | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) |
45ae7cff WD |
253 | |
254 | #define FSYNR0_WNR (1 << 4) | |
255 | ||
4cf740b0 WD |
256 | static int force_stage; |
257 | module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR); | |
258 | MODULE_PARM_DESC(force_stage, | |
259 | "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); | |
260 | ||
09360403 RM |
261 | enum arm_smmu_arch_version { |
262 | ARM_SMMU_V1 = 1, | |
263 | ARM_SMMU_V2, | |
264 | }; | |
265 | ||
45ae7cff WD |
266 | struct arm_smmu_smr { |
267 | u8 idx; | |
268 | u16 mask; | |
269 | u16 id; | |
270 | }; | |
271 | ||
a9a1b0b5 | 272 | struct arm_smmu_master_cfg { |
45ae7cff WD |
273 | int num_streamids; |
274 | u16 streamids[MAX_MASTER_STREAMIDS]; | |
45ae7cff WD |
275 | struct arm_smmu_smr *smrs; |
276 | }; | |
277 | ||
a9a1b0b5 WD |
278 | struct arm_smmu_master { |
279 | struct device_node *of_node; | |
a9a1b0b5 WD |
280 | struct rb_node node; |
281 | struct arm_smmu_master_cfg cfg; | |
282 | }; | |
283 | ||
45ae7cff WD |
284 | struct arm_smmu_device { |
285 | struct device *dev; | |
45ae7cff WD |
286 | |
287 | void __iomem *base; | |
288 | unsigned long size; | |
c757e852 | 289 | unsigned long pgshift; |
45ae7cff WD |
290 | |
291 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | |
292 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | |
293 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | |
294 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | |
295 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | |
859a732e | 296 | #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) |
45ae7cff | 297 | u32 features; |
3a5df8ff AH |
298 | |
299 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | |
300 | u32 options; | |
09360403 | 301 | enum arm_smmu_arch_version version; |
45ae7cff WD |
302 | |
303 | u32 num_context_banks; | |
304 | u32 num_s2_context_banks; | |
305 | DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); | |
306 | atomic_t irptndx; | |
307 | ||
308 | u32 num_mapping_groups; | |
309 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | |
310 | ||
518f7136 WD |
311 | unsigned long va_size; |
312 | unsigned long ipa_size; | |
313 | unsigned long pa_size; | |
45ae7cff WD |
314 | |
315 | u32 num_global_irqs; | |
316 | u32 num_context_irqs; | |
317 | unsigned int *irqs; | |
318 | ||
45ae7cff WD |
319 | struct list_head list; |
320 | struct rb_root masters; | |
321 | }; | |
322 | ||
323 | struct arm_smmu_cfg { | |
45ae7cff WD |
324 | u8 cbndx; |
325 | u8 irptndx; | |
326 | u32 cbar; | |
45ae7cff | 327 | }; |
faea13b7 | 328 | #define INVALID_IRPTNDX 0xff |
45ae7cff | 329 | |
ecfadb6e WD |
330 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) |
331 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | |
332 | ||
c752ce45 WD |
333 | enum arm_smmu_domain_stage { |
334 | ARM_SMMU_DOMAIN_S1 = 0, | |
335 | ARM_SMMU_DOMAIN_S2, | |
336 | ARM_SMMU_DOMAIN_NESTED, | |
337 | }; | |
338 | ||
45ae7cff | 339 | struct arm_smmu_domain { |
44680eed | 340 | struct arm_smmu_device *smmu; |
518f7136 WD |
341 | struct io_pgtable_ops *pgtbl_ops; |
342 | spinlock_t pgtbl_lock; | |
44680eed | 343 | struct arm_smmu_cfg cfg; |
c752ce45 | 344 | enum arm_smmu_domain_stage stage; |
518f7136 | 345 | struct mutex init_mutex; /* Protects smmu pointer */ |
45ae7cff WD |
346 | }; |
347 | ||
518f7136 WD |
348 | static struct iommu_ops arm_smmu_ops; |
349 | ||
45ae7cff WD |
350 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
351 | static LIST_HEAD(arm_smmu_devices); | |
352 | ||
3a5df8ff AH |
353 | struct arm_smmu_option_prop { |
354 | u32 opt; | |
355 | const char *prop; | |
356 | }; | |
357 | ||
2907320d | 358 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
3a5df8ff AH |
359 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, |
360 | { 0, NULL}, | |
361 | }; | |
362 | ||
363 | static void parse_driver_options(struct arm_smmu_device *smmu) | |
364 | { | |
365 | int i = 0; | |
2907320d | 366 | |
3a5df8ff AH |
367 | do { |
368 | if (of_property_read_bool(smmu->dev->of_node, | |
369 | arm_smmu_options[i].prop)) { | |
370 | smmu->options |= arm_smmu_options[i].opt; | |
371 | dev_notice(smmu->dev, "option %s\n", | |
372 | arm_smmu_options[i].prop); | |
373 | } | |
374 | } while (arm_smmu_options[++i].opt); | |
375 | } | |
376 | ||
8f68f8e2 | 377 | static struct device_node *dev_get_dev_node(struct device *dev) |
a9a1b0b5 WD |
378 | { |
379 | if (dev_is_pci(dev)) { | |
380 | struct pci_bus *bus = to_pci_dev(dev)->bus; | |
2907320d | 381 | |
a9a1b0b5 WD |
382 | while (!pci_is_root_bus(bus)) |
383 | bus = bus->parent; | |
8f68f8e2 | 384 | return bus->bridge->parent->of_node; |
a9a1b0b5 WD |
385 | } |
386 | ||
8f68f8e2 | 387 | return dev->of_node; |
a9a1b0b5 WD |
388 | } |
389 | ||
45ae7cff WD |
390 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
391 | struct device_node *dev_node) | |
392 | { | |
393 | struct rb_node *node = smmu->masters.rb_node; | |
394 | ||
395 | while (node) { | |
396 | struct arm_smmu_master *master; | |
2907320d | 397 | |
45ae7cff WD |
398 | master = container_of(node, struct arm_smmu_master, node); |
399 | ||
400 | if (dev_node < master->of_node) | |
401 | node = node->rb_left; | |
402 | else if (dev_node > master->of_node) | |
403 | node = node->rb_right; | |
404 | else | |
405 | return master; | |
406 | } | |
407 | ||
408 | return NULL; | |
409 | } | |
410 | ||
a9a1b0b5 | 411 | static struct arm_smmu_master_cfg * |
8f68f8e2 | 412 | find_smmu_master_cfg(struct device *dev) |
a9a1b0b5 | 413 | { |
8f68f8e2 WD |
414 | struct arm_smmu_master_cfg *cfg = NULL; |
415 | struct iommu_group *group = iommu_group_get(dev); | |
a9a1b0b5 | 416 | |
8f68f8e2 WD |
417 | if (group) { |
418 | cfg = iommu_group_get_iommudata(group); | |
419 | iommu_group_put(group); | |
420 | } | |
a9a1b0b5 | 421 | |
8f68f8e2 | 422 | return cfg; |
a9a1b0b5 WD |
423 | } |
424 | ||
45ae7cff WD |
425 | static int insert_smmu_master(struct arm_smmu_device *smmu, |
426 | struct arm_smmu_master *master) | |
427 | { | |
428 | struct rb_node **new, *parent; | |
429 | ||
430 | new = &smmu->masters.rb_node; | |
431 | parent = NULL; | |
432 | while (*new) { | |
2907320d MH |
433 | struct arm_smmu_master *this |
434 | = container_of(*new, struct arm_smmu_master, node); | |
45ae7cff WD |
435 | |
436 | parent = *new; | |
437 | if (master->of_node < this->of_node) | |
438 | new = &((*new)->rb_left); | |
439 | else if (master->of_node > this->of_node) | |
440 | new = &((*new)->rb_right); | |
441 | else | |
442 | return -EEXIST; | |
443 | } | |
444 | ||
445 | rb_link_node(&master->node, parent, new); | |
446 | rb_insert_color(&master->node, &smmu->masters); | |
447 | return 0; | |
448 | } | |
449 | ||
450 | static int register_smmu_master(struct arm_smmu_device *smmu, | |
451 | struct device *dev, | |
452 | struct of_phandle_args *masterspec) | |
453 | { | |
454 | int i; | |
455 | struct arm_smmu_master *master; | |
456 | ||
457 | master = find_smmu_master(smmu, masterspec->np); | |
458 | if (master) { | |
459 | dev_err(dev, | |
460 | "rejecting multiple registrations for master device %s\n", | |
461 | masterspec->np->name); | |
462 | return -EBUSY; | |
463 | } | |
464 | ||
465 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | |
466 | dev_err(dev, | |
467 | "reached maximum number (%d) of stream IDs for master device %s\n", | |
468 | MAX_MASTER_STREAMIDS, masterspec->np->name); | |
469 | return -ENOSPC; | |
470 | } | |
471 | ||
472 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | |
473 | if (!master) | |
474 | return -ENOMEM; | |
475 | ||
a9a1b0b5 WD |
476 | master->of_node = masterspec->np; |
477 | master->cfg.num_streamids = masterspec->args_count; | |
45ae7cff | 478 | |
3c8766d0 OH |
479 | for (i = 0; i < master->cfg.num_streamids; ++i) { |
480 | u16 streamid = masterspec->args[i]; | |
45ae7cff | 481 | |
3c8766d0 OH |
482 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && |
483 | (streamid >= smmu->num_mapping_groups)) { | |
484 | dev_err(dev, | |
485 | "stream ID for master device %s greater than maximum allowed (%d)\n", | |
486 | masterspec->np->name, smmu->num_mapping_groups); | |
487 | return -ERANGE; | |
488 | } | |
489 | master->cfg.streamids[i] = streamid; | |
490 | } | |
45ae7cff WD |
491 | return insert_smmu_master(smmu, master); |
492 | } | |
493 | ||
44680eed | 494 | static struct arm_smmu_device *find_smmu_for_device(struct device *dev) |
45ae7cff | 495 | { |
44680eed | 496 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 497 | struct arm_smmu_master *master = NULL; |
8f68f8e2 | 498 | struct device_node *dev_node = dev_get_dev_node(dev); |
45ae7cff WD |
499 | |
500 | spin_lock(&arm_smmu_devices_lock); | |
44680eed | 501 | list_for_each_entry(smmu, &arm_smmu_devices, list) { |
a9a1b0b5 WD |
502 | master = find_smmu_master(smmu, dev_node); |
503 | if (master) | |
504 | break; | |
505 | } | |
45ae7cff | 506 | spin_unlock(&arm_smmu_devices_lock); |
44680eed | 507 | |
a9a1b0b5 | 508 | return master ? smmu : NULL; |
45ae7cff WD |
509 | } |
510 | ||
511 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | |
512 | { | |
513 | int idx; | |
514 | ||
515 | do { | |
516 | idx = find_next_zero_bit(map, end, start); | |
517 | if (idx == end) | |
518 | return -ENOSPC; | |
519 | } while (test_and_set_bit(idx, map)); | |
520 | ||
521 | return idx; | |
522 | } | |
523 | ||
524 | static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | |
525 | { | |
526 | clear_bit(idx, map); | |
527 | } | |
528 | ||
529 | /* Wait for any pending TLB invalidations to complete */ | |
518f7136 | 530 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) |
45ae7cff WD |
531 | { |
532 | int count = 0; | |
533 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
534 | ||
535 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | |
536 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | |
537 | & sTLBGSTATUS_GSACTIVE) { | |
538 | cpu_relax(); | |
539 | if (++count == TLB_LOOP_TIMEOUT) { | |
540 | dev_err_ratelimited(smmu->dev, | |
541 | "TLB sync timed out -- SMMU may be deadlocked\n"); | |
542 | return; | |
543 | } | |
544 | udelay(1); | |
545 | } | |
546 | } | |
547 | ||
518f7136 WD |
548 | static void arm_smmu_tlb_sync(void *cookie) |
549 | { | |
550 | struct arm_smmu_domain *smmu_domain = cookie; | |
551 | __arm_smmu_tlb_sync(smmu_domain->smmu); | |
552 | } | |
553 | ||
554 | static void arm_smmu_tlb_inv_context(void *cookie) | |
1463fe44 | 555 | { |
518f7136 | 556 | struct arm_smmu_domain *smmu_domain = cookie; |
44680eed WD |
557 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
558 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
1463fe44 | 559 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
518f7136 | 560 | void __iomem *base; |
1463fe44 WD |
561 | |
562 | if (stage1) { | |
563 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
ecfadb6e WD |
564 | writel_relaxed(ARM_SMMU_CB_ASID(cfg), |
565 | base + ARM_SMMU_CB_S1_TLBIASID); | |
1463fe44 WD |
566 | } else { |
567 | base = ARM_SMMU_GR0(smmu); | |
ecfadb6e WD |
568 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), |
569 | base + ARM_SMMU_GR0_TLBIVMID); | |
1463fe44 WD |
570 | } |
571 | ||
518f7136 WD |
572 | __arm_smmu_tlb_sync(smmu); |
573 | } | |
574 | ||
575 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
576 | bool leaf, void *cookie) | |
577 | { | |
578 | struct arm_smmu_domain *smmu_domain = cookie; | |
579 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
580 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
581 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | |
582 | void __iomem *reg; | |
583 | ||
584 | if (stage1) { | |
585 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
586 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
587 | ||
588 | if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { | |
589 | iova &= ~12UL; | |
590 | iova |= ARM_SMMU_CB_ASID(cfg); | |
591 | writel_relaxed(iova, reg); | |
592 | #ifdef CONFIG_64BIT | |
593 | } else { | |
594 | iova >>= 12; | |
595 | iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; | |
596 | writeq_relaxed(iova, reg); | |
597 | #endif | |
598 | } | |
599 | #ifdef CONFIG_64BIT | |
600 | } else if (smmu->version == ARM_SMMU_V2) { | |
601 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
602 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : | |
603 | ARM_SMMU_CB_S2_TLBIIPAS2; | |
604 | writeq_relaxed(iova >> 12, reg); | |
605 | #endif | |
606 | } else { | |
607 | reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; | |
608 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); | |
609 | } | |
610 | } | |
611 | ||
612 | static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) | |
613 | { | |
614 | struct arm_smmu_domain *smmu_domain = cookie; | |
615 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
616 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | |
617 | ||
618 | ||
619 | /* Ensure new page tables are visible to the hardware walker */ | |
620 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { | |
621 | dsb(ishst); | |
622 | } else { | |
623 | /* | |
624 | * If the SMMU can't walk tables in the CPU caches, treat them | |
625 | * like non-coherent DMA since we need to flush the new entries | |
626 | * all the way out to memory. There's no possibility of | |
627 | * recursion here as the SMMU table walker will not be wired | |
628 | * through another SMMU. | |
629 | */ | |
630 | dma_map_page(smmu->dev, virt_to_page(addr), offset, size, | |
631 | DMA_TO_DEVICE); | |
632 | } | |
1463fe44 WD |
633 | } |
634 | ||
518f7136 WD |
635 | static struct iommu_gather_ops arm_smmu_gather_ops = { |
636 | .tlb_flush_all = arm_smmu_tlb_inv_context, | |
637 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | |
638 | .tlb_sync = arm_smmu_tlb_sync, | |
639 | .flush_pgtable = arm_smmu_flush_pgtable, | |
640 | }; | |
641 | ||
45ae7cff WD |
642 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
643 | { | |
644 | int flags, ret; | |
645 | u32 fsr, far, fsynr, resume; | |
646 | unsigned long iova; | |
647 | struct iommu_domain *domain = dev; | |
648 | struct arm_smmu_domain *smmu_domain = domain->priv; | |
44680eed WD |
649 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
650 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
45ae7cff WD |
651 | void __iomem *cb_base; |
652 | ||
44680eed | 653 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
45ae7cff WD |
654 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); |
655 | ||
656 | if (!(fsr & FSR_FAULT)) | |
657 | return IRQ_NONE; | |
658 | ||
659 | if (fsr & FSR_IGN) | |
660 | dev_err_ratelimited(smmu->dev, | |
70c9a7db | 661 | "Unexpected context fault (fsr 0x%x)\n", |
45ae7cff WD |
662 | fsr); |
663 | ||
664 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | |
665 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
666 | ||
667 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); | |
668 | iova = far; | |
669 | #ifdef CONFIG_64BIT | |
670 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); | |
671 | iova |= ((unsigned long)far << 32); | |
672 | #endif | |
673 | ||
674 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | |
675 | ret = IRQ_HANDLED; | |
676 | resume = RESUME_RETRY; | |
677 | } else { | |
2ef0f031 AH |
678 | dev_err_ratelimited(smmu->dev, |
679 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | |
44680eed | 680 | iova, fsynr, cfg->cbndx); |
45ae7cff WD |
681 | ret = IRQ_NONE; |
682 | resume = RESUME_TERMINATE; | |
683 | } | |
684 | ||
685 | /* Clear the faulting FSR */ | |
686 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | |
687 | ||
688 | /* Retry or terminate any stalled transactions */ | |
689 | if (fsr & FSR_SS) | |
690 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | |
691 | ||
692 | return ret; | |
693 | } | |
694 | ||
695 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |
696 | { | |
697 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | |
698 | struct arm_smmu_device *smmu = dev; | |
3a5df8ff | 699 | void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); |
45ae7cff WD |
700 | |
701 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | |
702 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | |
703 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | |
704 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | |
705 | ||
3a5df8ff AH |
706 | if (!gfsr) |
707 | return IRQ_NONE; | |
708 | ||
45ae7cff WD |
709 | dev_err_ratelimited(smmu->dev, |
710 | "Unexpected global fault, this could be serious\n"); | |
711 | dev_err_ratelimited(smmu->dev, | |
712 | "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", | |
713 | gfsr, gfsynr0, gfsynr1, gfsynr2); | |
714 | ||
715 | writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); | |
adaba320 | 716 | return IRQ_HANDLED; |
45ae7cff WD |
717 | } |
718 | ||
518f7136 WD |
719 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
720 | struct io_pgtable_cfg *pgtbl_cfg) | |
45ae7cff WD |
721 | { |
722 | u32 reg; | |
723 | bool stage1; | |
44680eed WD |
724 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
725 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
45ae7cff WD |
726 | void __iomem *cb_base, *gr0_base, *gr1_base; |
727 | ||
728 | gr0_base = ARM_SMMU_GR0(smmu); | |
729 | gr1_base = ARM_SMMU_GR1(smmu); | |
44680eed WD |
730 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
731 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
45ae7cff | 732 | |
4a1c93cb WD |
733 | if (smmu->version > ARM_SMMU_V1) { |
734 | /* | |
735 | * CBA2R. | |
736 | * *Must* be initialised before CBAR thanks to VMID16 | |
737 | * architectural oversight affected some implementations. | |
738 | */ | |
739 | #ifdef CONFIG_64BIT | |
740 | reg = CBA2R_RW64_64BIT; | |
741 | #else | |
742 | reg = CBA2R_RW64_32BIT; | |
743 | #endif | |
744 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | |
745 | } | |
746 | ||
45ae7cff | 747 | /* CBAR */ |
44680eed | 748 | reg = cfg->cbar; |
09360403 | 749 | if (smmu->version == ARM_SMMU_V1) |
2907320d | 750 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; |
45ae7cff | 751 | |
57ca90f6 WD |
752 | /* |
753 | * Use the weakest shareability/memory types, so they are | |
754 | * overridden by the ttbcr/pte. | |
755 | */ | |
756 | if (stage1) { | |
757 | reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | | |
758 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | |
759 | } else { | |
44680eed | 760 | reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; |
57ca90f6 | 761 | } |
44680eed | 762 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
45ae7cff | 763 | |
518f7136 WD |
764 | /* TTBRs */ |
765 | if (stage1) { | |
766 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | |
767 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | |
768 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; | |
44680eed | 769 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; |
518f7136 | 770 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); |
45ae7cff | 771 | |
518f7136 WD |
772 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; |
773 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); | |
774 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; | |
775 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; | |
776 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); | |
777 | } else { | |
778 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; | |
779 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | |
780 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; | |
781 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | |
782 | } | |
a65217a4 | 783 | |
518f7136 WD |
784 | /* TTBCR */ |
785 | if (stage1) { | |
786 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | |
787 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
788 | if (smmu->version > ARM_SMMU_V1) { | |
789 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | |
790 | switch (smmu->va_size) { | |
45ae7cff | 791 | case 32: |
518f7136 | 792 | reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); |
45ae7cff WD |
793 | break; |
794 | case 36: | |
518f7136 | 795 | reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); |
45ae7cff WD |
796 | break; |
797 | case 40: | |
518f7136 | 798 | reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); |
45ae7cff WD |
799 | break; |
800 | case 42: | |
518f7136 | 801 | reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); |
45ae7cff WD |
802 | break; |
803 | case 44: | |
518f7136 | 804 | reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); |
45ae7cff WD |
805 | break; |
806 | case 48: | |
518f7136 | 807 | reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); |
45ae7cff WD |
808 | break; |
809 | } | |
518f7136 | 810 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
45ae7cff WD |
811 | } |
812 | } else { | |
518f7136 WD |
813 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
814 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
45ae7cff WD |
815 | } |
816 | ||
518f7136 | 817 | /* MAIRs (stage-1 only) */ |
45ae7cff | 818 | if (stage1) { |
518f7136 | 819 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; |
45ae7cff | 820 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); |
518f7136 WD |
821 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; |
822 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); | |
45ae7cff WD |
823 | } |
824 | ||
45ae7cff WD |
825 | /* SCTLR */ |
826 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | |
827 | if (stage1) | |
828 | reg |= SCTLR_S1_ASIDPNE; | |
829 | #ifdef __BIG_ENDIAN | |
830 | reg |= SCTLR_E; | |
831 | #endif | |
25724841 | 832 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); |
45ae7cff WD |
833 | } |
834 | ||
835 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |
44680eed | 836 | struct arm_smmu_device *smmu) |
45ae7cff | 837 | { |
a18037b2 | 838 | int irq, start, ret = 0; |
518f7136 WD |
839 | unsigned long ias, oas; |
840 | struct io_pgtable_ops *pgtbl_ops; | |
841 | struct io_pgtable_cfg pgtbl_cfg; | |
842 | enum io_pgtable_fmt fmt; | |
45ae7cff | 843 | struct arm_smmu_domain *smmu_domain = domain->priv; |
44680eed | 844 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
45ae7cff | 845 | |
518f7136 | 846 | mutex_lock(&smmu_domain->init_mutex); |
a18037b2 MH |
847 | if (smmu_domain->smmu) |
848 | goto out_unlock; | |
849 | ||
c752ce45 WD |
850 | /* |
851 | * Mapping the requested stage onto what we support is surprisingly | |
852 | * complicated, mainly because the spec allows S1+S2 SMMUs without | |
853 | * support for nested translation. That means we end up with the | |
854 | * following table: | |
855 | * | |
856 | * Requested Supported Actual | |
857 | * S1 N S1 | |
858 | * S1 S1+S2 S1 | |
859 | * S1 S2 S2 | |
860 | * S1 S1 S1 | |
861 | * N N N | |
862 | * N S1+S2 S2 | |
863 | * N S2 S2 | |
864 | * N S1 S1 | |
865 | * | |
866 | * Note that you can't actually request stage-2 mappings. | |
867 | */ | |
868 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) | |
869 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; | |
870 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) | |
871 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
872 | ||
873 | switch (smmu_domain->stage) { | |
874 | case ARM_SMMU_DOMAIN_S1: | |
875 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | |
876 | start = smmu->num_s2_context_banks; | |
518f7136 WD |
877 | ias = smmu->va_size; |
878 | oas = smmu->ipa_size; | |
879 | if (IS_ENABLED(CONFIG_64BIT)) | |
880 | fmt = ARM_64_LPAE_S1; | |
881 | else | |
882 | fmt = ARM_32_LPAE_S1; | |
c752ce45 WD |
883 | break; |
884 | case ARM_SMMU_DOMAIN_NESTED: | |
45ae7cff WD |
885 | /* |
886 | * We will likely want to change this if/when KVM gets | |
887 | * involved. | |
888 | */ | |
c752ce45 | 889 | case ARM_SMMU_DOMAIN_S2: |
9c5c92e3 WD |
890 | cfg->cbar = CBAR_TYPE_S2_TRANS; |
891 | start = 0; | |
518f7136 WD |
892 | ias = smmu->ipa_size; |
893 | oas = smmu->pa_size; | |
894 | if (IS_ENABLED(CONFIG_64BIT)) | |
895 | fmt = ARM_64_LPAE_S2; | |
896 | else | |
897 | fmt = ARM_32_LPAE_S2; | |
c752ce45 WD |
898 | break; |
899 | default: | |
900 | ret = -EINVAL; | |
901 | goto out_unlock; | |
45ae7cff WD |
902 | } |
903 | ||
904 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | |
905 | smmu->num_context_banks); | |
906 | if (IS_ERR_VALUE(ret)) | |
a18037b2 | 907 | goto out_unlock; |
45ae7cff | 908 | |
44680eed | 909 | cfg->cbndx = ret; |
09360403 | 910 | if (smmu->version == ARM_SMMU_V1) { |
44680eed WD |
911 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); |
912 | cfg->irptndx %= smmu->num_context_irqs; | |
45ae7cff | 913 | } else { |
44680eed | 914 | cfg->irptndx = cfg->cbndx; |
45ae7cff WD |
915 | } |
916 | ||
518f7136 WD |
917 | pgtbl_cfg = (struct io_pgtable_cfg) { |
918 | .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, | |
919 | .ias = ias, | |
920 | .oas = oas, | |
921 | .tlb = &arm_smmu_gather_ops, | |
922 | }; | |
923 | ||
924 | smmu_domain->smmu = smmu; | |
925 | pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); | |
926 | if (!pgtbl_ops) { | |
927 | ret = -ENOMEM; | |
928 | goto out_clear_smmu; | |
929 | } | |
930 | ||
931 | /* Update our support page sizes to reflect the page table format */ | |
932 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
a18037b2 | 933 | |
518f7136 WD |
934 | /* Initialise the context bank with our page table cfg */ |
935 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); | |
936 | ||
937 | /* | |
938 | * Request context fault interrupt. Do this last to avoid the | |
939 | * handler seeing a half-initialised domain state. | |
940 | */ | |
44680eed | 941 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
45ae7cff WD |
942 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
943 | "arm-smmu-context-fault", domain); | |
944 | if (IS_ERR_VALUE(ret)) { | |
945 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | |
44680eed WD |
946 | cfg->irptndx, irq); |
947 | cfg->irptndx = INVALID_IRPTNDX; | |
45ae7cff WD |
948 | } |
949 | ||
518f7136 WD |
950 | mutex_unlock(&smmu_domain->init_mutex); |
951 | ||
952 | /* Publish page table ops for map/unmap */ | |
953 | smmu_domain->pgtbl_ops = pgtbl_ops; | |
a9a1b0b5 | 954 | return 0; |
45ae7cff | 955 | |
518f7136 WD |
956 | out_clear_smmu: |
957 | smmu_domain->smmu = NULL; | |
a18037b2 | 958 | out_unlock: |
518f7136 | 959 | mutex_unlock(&smmu_domain->init_mutex); |
45ae7cff WD |
960 | return ret; |
961 | } | |
962 | ||
963 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |
964 | { | |
965 | struct arm_smmu_domain *smmu_domain = domain->priv; | |
44680eed WD |
966 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
967 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1463fe44 | 968 | void __iomem *cb_base; |
45ae7cff WD |
969 | int irq; |
970 | ||
971 | if (!smmu) | |
972 | return; | |
973 | ||
518f7136 WD |
974 | /* |
975 | * Disable the context bank and free the page tables before freeing | |
976 | * it. | |
977 | */ | |
44680eed | 978 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
1463fe44 | 979 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1463fe44 | 980 | |
44680eed WD |
981 | if (cfg->irptndx != INVALID_IRPTNDX) { |
982 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | |
45ae7cff WD |
983 | free_irq(irq, domain); |
984 | } | |
985 | ||
518f7136 WD |
986 | if (smmu_domain->pgtbl_ops) |
987 | free_io_pgtable_ops(smmu_domain->pgtbl_ops); | |
988 | ||
44680eed | 989 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
45ae7cff WD |
990 | } |
991 | ||
992 | static int arm_smmu_domain_init(struct iommu_domain *domain) | |
993 | { | |
994 | struct arm_smmu_domain *smmu_domain; | |
45ae7cff WD |
995 | |
996 | /* | |
997 | * Allocate the domain and initialise some of its data structures. | |
998 | * We can't really do anything meaningful until we've added a | |
999 | * master. | |
1000 | */ | |
1001 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | |
1002 | if (!smmu_domain) | |
1003 | return -ENOMEM; | |
1004 | ||
518f7136 WD |
1005 | mutex_init(&smmu_domain->init_mutex); |
1006 | spin_lock_init(&smmu_domain->pgtbl_lock); | |
45ae7cff WD |
1007 | domain->priv = smmu_domain; |
1008 | return 0; | |
45ae7cff WD |
1009 | } |
1010 | ||
1011 | static void arm_smmu_domain_destroy(struct iommu_domain *domain) | |
1012 | { | |
1013 | struct arm_smmu_domain *smmu_domain = domain->priv; | |
1463fe44 WD |
1014 | |
1015 | /* | |
1016 | * Free the domain resources. We assume that all devices have | |
1017 | * already been detached. | |
1018 | */ | |
45ae7cff | 1019 | arm_smmu_destroy_domain_context(domain); |
45ae7cff WD |
1020 | kfree(smmu_domain); |
1021 | } | |
1022 | ||
1023 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 1024 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1025 | { |
1026 | int i; | |
1027 | struct arm_smmu_smr *smrs; | |
1028 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1029 | ||
1030 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | |
1031 | return 0; | |
1032 | ||
a9a1b0b5 | 1033 | if (cfg->smrs) |
45ae7cff WD |
1034 | return -EEXIST; |
1035 | ||
2907320d | 1036 | smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); |
45ae7cff | 1037 | if (!smrs) { |
a9a1b0b5 WD |
1038 | dev_err(smmu->dev, "failed to allocate %d SMRs\n", |
1039 | cfg->num_streamids); | |
45ae7cff WD |
1040 | return -ENOMEM; |
1041 | } | |
1042 | ||
44680eed | 1043 | /* Allocate the SMRs on the SMMU */ |
a9a1b0b5 | 1044 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1045 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, |
1046 | smmu->num_mapping_groups); | |
1047 | if (IS_ERR_VALUE(idx)) { | |
1048 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | |
1049 | goto err_free_smrs; | |
1050 | } | |
1051 | ||
1052 | smrs[i] = (struct arm_smmu_smr) { | |
1053 | .idx = idx, | |
1054 | .mask = 0, /* We don't currently share SMRs */ | |
a9a1b0b5 | 1055 | .id = cfg->streamids[i], |
45ae7cff WD |
1056 | }; |
1057 | } | |
1058 | ||
1059 | /* It worked! Now, poke the actual hardware */ | |
a9a1b0b5 | 1060 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1061 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | |
1062 | smrs[i].mask << SMR_MASK_SHIFT; | |
1063 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | |
1064 | } | |
1065 | ||
a9a1b0b5 | 1066 | cfg->smrs = smrs; |
45ae7cff WD |
1067 | return 0; |
1068 | ||
1069 | err_free_smrs: | |
1070 | while (--i >= 0) | |
1071 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | |
1072 | kfree(smrs); | |
1073 | return -ENOSPC; | |
1074 | } | |
1075 | ||
1076 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 1077 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1078 | { |
1079 | int i; | |
1080 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
a9a1b0b5 | 1081 | struct arm_smmu_smr *smrs = cfg->smrs; |
45ae7cff | 1082 | |
43b412be WD |
1083 | if (!smrs) |
1084 | return; | |
1085 | ||
45ae7cff | 1086 | /* Invalidate the SMRs before freeing back to the allocator */ |
a9a1b0b5 | 1087 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1088 | u8 idx = smrs[i].idx; |
2907320d | 1089 | |
45ae7cff WD |
1090 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); |
1091 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | |
1092 | } | |
1093 | ||
a9a1b0b5 | 1094 | cfg->smrs = NULL; |
45ae7cff WD |
1095 | kfree(smrs); |
1096 | } | |
1097 | ||
45ae7cff | 1098 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
a9a1b0b5 | 1099 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1100 | { |
1101 | int i, ret; | |
44680eed | 1102 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
45ae7cff WD |
1103 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1104 | ||
8f68f8e2 | 1105 | /* Devices in an IOMMU group may already be configured */ |
a9a1b0b5 | 1106 | ret = arm_smmu_master_configure_smrs(smmu, cfg); |
45ae7cff | 1107 | if (ret) |
8f68f8e2 | 1108 | return ret == -EEXIST ? 0 : ret; |
45ae7cff | 1109 | |
a9a1b0b5 | 1110 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1111 | u32 idx, s2cr; |
2907320d | 1112 | |
a9a1b0b5 | 1113 | idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; |
6069d23c | 1114 | s2cr = S2CR_TYPE_TRANS | |
44680eed | 1115 | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); |
45ae7cff WD |
1116 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); |
1117 | } | |
1118 | ||
1119 | return 0; | |
1120 | } | |
1121 | ||
1122 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | |
a9a1b0b5 | 1123 | struct arm_smmu_master_cfg *cfg) |
45ae7cff | 1124 | { |
43b412be | 1125 | int i; |
44680eed | 1126 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
43b412be | 1127 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
45ae7cff | 1128 | |
8f68f8e2 WD |
1129 | /* An IOMMU group is torn down by the first device to be removed */ |
1130 | if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) | |
1131 | return; | |
45ae7cff WD |
1132 | |
1133 | /* | |
1134 | * We *must* clear the S2CR first, because freeing the SMR means | |
1135 | * that it can be re-allocated immediately. | |
1136 | */ | |
43b412be WD |
1137 | for (i = 0; i < cfg->num_streamids; ++i) { |
1138 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | |
1139 | ||
1140 | writel_relaxed(S2CR_TYPE_BYPASS, | |
1141 | gr0_base + ARM_SMMU_GR0_S2CR(idx)); | |
1142 | } | |
1143 | ||
a9a1b0b5 | 1144 | arm_smmu_master_free_smrs(smmu, cfg); |
45ae7cff WD |
1145 | } |
1146 | ||
1147 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
1148 | { | |
a18037b2 | 1149 | int ret; |
45ae7cff | 1150 | struct arm_smmu_domain *smmu_domain = domain->priv; |
518f7136 | 1151 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 1152 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1153 | |
8f68f8e2 | 1154 | smmu = find_smmu_for_device(dev); |
44680eed | 1155 | if (!smmu) { |
45ae7cff WD |
1156 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
1157 | return -ENXIO; | |
1158 | } | |
1159 | ||
844e35bd WD |
1160 | if (dev->archdata.iommu) { |
1161 | dev_err(dev, "already attached to IOMMU domain\n"); | |
1162 | return -EEXIST; | |
1163 | } | |
1164 | ||
518f7136 WD |
1165 | /* Ensure that the domain is finalised */ |
1166 | ret = arm_smmu_init_domain_context(domain, smmu); | |
1167 | if (IS_ERR_VALUE(ret)) | |
1168 | return ret; | |
1169 | ||
45ae7cff | 1170 | /* |
44680eed WD |
1171 | * Sanity check the domain. We don't support domains across |
1172 | * different SMMUs. | |
45ae7cff | 1173 | */ |
518f7136 | 1174 | if (smmu_domain->smmu != smmu) { |
45ae7cff WD |
1175 | dev_err(dev, |
1176 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | |
a18037b2 MH |
1177 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
1178 | return -EINVAL; | |
45ae7cff | 1179 | } |
45ae7cff WD |
1180 | |
1181 | /* Looks ok, so add the device to the domain */ | |
8f68f8e2 | 1182 | cfg = find_smmu_master_cfg(dev); |
a9a1b0b5 | 1183 | if (!cfg) |
45ae7cff WD |
1184 | return -ENODEV; |
1185 | ||
844e35bd WD |
1186 | ret = arm_smmu_domain_add_master(smmu_domain, cfg); |
1187 | if (!ret) | |
1188 | dev->archdata.iommu = domain; | |
45ae7cff WD |
1189 | return ret; |
1190 | } | |
1191 | ||
1192 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
1193 | { | |
1194 | struct arm_smmu_domain *smmu_domain = domain->priv; | |
a9a1b0b5 | 1195 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1196 | |
8f68f8e2 | 1197 | cfg = find_smmu_master_cfg(dev); |
844e35bd WD |
1198 | if (!cfg) |
1199 | return; | |
1200 | ||
1201 | dev->archdata.iommu = NULL; | |
1202 | arm_smmu_domain_remove_master(smmu_domain, cfg); | |
45ae7cff WD |
1203 | } |
1204 | ||
45ae7cff | 1205 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
b410aed9 | 1206 | phys_addr_t paddr, size_t size, int prot) |
45ae7cff | 1207 | { |
518f7136 WD |
1208 | int ret; |
1209 | unsigned long flags; | |
45ae7cff | 1210 | struct arm_smmu_domain *smmu_domain = domain->priv; |
518f7136 | 1211 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1212 | |
518f7136 | 1213 | if (!ops) |
45ae7cff WD |
1214 | return -ENODEV; |
1215 | ||
518f7136 WD |
1216 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
1217 | ret = ops->map(ops, iova, paddr, size, prot); | |
1218 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1219 | return ret; | |
45ae7cff WD |
1220 | } |
1221 | ||
1222 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |
1223 | size_t size) | |
1224 | { | |
518f7136 WD |
1225 | size_t ret; |
1226 | unsigned long flags; | |
45ae7cff | 1227 | struct arm_smmu_domain *smmu_domain = domain->priv; |
518f7136 | 1228 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1229 | |
518f7136 WD |
1230 | if (!ops) |
1231 | return 0; | |
1232 | ||
1233 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | |
1234 | ret = ops->unmap(ops, iova, size); | |
1235 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1236 | return ret; | |
45ae7cff WD |
1237 | } |
1238 | ||
859a732e MH |
1239 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |
1240 | dma_addr_t iova) | |
1241 | { | |
1242 | struct arm_smmu_domain *smmu_domain = domain->priv; | |
1243 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
1244 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1245 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | |
1246 | struct device *dev = smmu->dev; | |
1247 | void __iomem *cb_base; | |
1248 | u32 tmp; | |
1249 | u64 phys; | |
1250 | ||
1251 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
1252 | ||
1253 | if (smmu->version == 1) { | |
1254 | u32 reg = iova & ~0xfff; | |
1255 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); | |
1256 | } else { | |
1257 | u32 reg = iova & ~0xfff; | |
1258 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); | |
a4188bee | 1259 | reg = ((u64)iova & ~0xfff) >> 32; |
859a732e MH |
1260 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI); |
1261 | } | |
1262 | ||
1263 | if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, | |
1264 | !(tmp & ATSR_ACTIVE), 5, 50)) { | |
1265 | dev_err(dev, | |
1266 | "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", | |
1267 | &iova); | |
1268 | return ops->iova_to_phys(ops, iova); | |
1269 | } | |
1270 | ||
1271 | phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); | |
1272 | phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; | |
1273 | ||
1274 | if (phys & CB_PAR_F) { | |
1275 | dev_err(dev, "translation fault!\n"); | |
1276 | dev_err(dev, "PAR = 0x%llx\n", phys); | |
1277 | return 0; | |
1278 | } | |
1279 | ||
1280 | return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); | |
1281 | } | |
1282 | ||
45ae7cff | 1283 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, |
859a732e | 1284 | dma_addr_t iova) |
45ae7cff | 1285 | { |
518f7136 WD |
1286 | phys_addr_t ret; |
1287 | unsigned long flags; | |
45ae7cff | 1288 | struct arm_smmu_domain *smmu_domain = domain->priv; |
518f7136 | 1289 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1290 | |
518f7136 | 1291 | if (!ops) |
a44a9791 | 1292 | return 0; |
45ae7cff | 1293 | |
518f7136 | 1294 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
859a732e MH |
1295 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) |
1296 | ret = arm_smmu_iova_to_phys_hard(domain, iova); | |
1297 | else | |
1298 | ret = ops->iova_to_phys(ops, iova); | |
518f7136 | 1299 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
859a732e | 1300 | |
518f7136 | 1301 | return ret; |
45ae7cff WD |
1302 | } |
1303 | ||
1fd0c775 | 1304 | static bool arm_smmu_capable(enum iommu_cap cap) |
45ae7cff | 1305 | { |
d0948945 WD |
1306 | switch (cap) { |
1307 | case IOMMU_CAP_CACHE_COHERENCY: | |
1fd0c775 JR |
1308 | /* |
1309 | * Return true here as the SMMU can always send out coherent | |
1310 | * requests. | |
1311 | */ | |
1312 | return true; | |
d0948945 | 1313 | case IOMMU_CAP_INTR_REMAP: |
1fd0c775 | 1314 | return true; /* MSIs are just memory writes */ |
0029a8dd AM |
1315 | case IOMMU_CAP_NOEXEC: |
1316 | return true; | |
d0948945 | 1317 | default: |
1fd0c775 | 1318 | return false; |
d0948945 | 1319 | } |
45ae7cff | 1320 | } |
45ae7cff | 1321 | |
a9a1b0b5 WD |
1322 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) |
1323 | { | |
1324 | *((u16 *)data) = alias; | |
1325 | return 0; /* Continue walking */ | |
45ae7cff WD |
1326 | } |
1327 | ||
8f68f8e2 WD |
1328 | static void __arm_smmu_release_pci_iommudata(void *data) |
1329 | { | |
1330 | kfree(data); | |
1331 | } | |
1332 | ||
45ae7cff WD |
1333 | static int arm_smmu_add_device(struct device *dev) |
1334 | { | |
a9a1b0b5 | 1335 | struct arm_smmu_device *smmu; |
8f68f8e2 | 1336 | struct arm_smmu_master_cfg *cfg; |
5fc63a7c | 1337 | struct iommu_group *group; |
8f68f8e2 | 1338 | void (*releasefn)(void *) = NULL; |
5fc63a7c AM |
1339 | int ret; |
1340 | ||
44680eed | 1341 | smmu = find_smmu_for_device(dev); |
a9a1b0b5 | 1342 | if (!smmu) |
45ae7cff WD |
1343 | return -ENODEV; |
1344 | ||
5fc63a7c AM |
1345 | group = iommu_group_alloc(); |
1346 | if (IS_ERR(group)) { | |
1347 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
1348 | return PTR_ERR(group); | |
1349 | } | |
1350 | ||
a9a1b0b5 | 1351 | if (dev_is_pci(dev)) { |
a9a1b0b5 WD |
1352 | struct pci_dev *pdev = to_pci_dev(dev); |
1353 | ||
1354 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | |
1355 | if (!cfg) { | |
1356 | ret = -ENOMEM; | |
1357 | goto out_put_group; | |
1358 | } | |
1359 | ||
1360 | cfg->num_streamids = 1; | |
1361 | /* | |
1362 | * Assume Stream ID == Requester ID for now. | |
1363 | * We need a way to describe the ID mappings in FDT. | |
1364 | */ | |
1365 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, | |
1366 | &cfg->streamids[0]); | |
8f68f8e2 | 1367 | releasefn = __arm_smmu_release_pci_iommudata; |
a9a1b0b5 | 1368 | } else { |
8f68f8e2 WD |
1369 | struct arm_smmu_master *master; |
1370 | ||
1371 | master = find_smmu_master(smmu, dev->of_node); | |
1372 | if (!master) { | |
1373 | ret = -ENODEV; | |
1374 | goto out_put_group; | |
1375 | } | |
1376 | ||
1377 | cfg = &master->cfg; | |
a9a1b0b5 WD |
1378 | } |
1379 | ||
8f68f8e2 | 1380 | iommu_group_set_iommudata(group, cfg, releasefn); |
5fc63a7c | 1381 | ret = iommu_group_add_device(group, dev); |
5fc63a7c | 1382 | |
a9a1b0b5 WD |
1383 | out_put_group: |
1384 | iommu_group_put(group); | |
5fc63a7c | 1385 | return ret; |
45ae7cff WD |
1386 | } |
1387 | ||
1388 | static void arm_smmu_remove_device(struct device *dev) | |
1389 | { | |
5fc63a7c | 1390 | iommu_group_remove_device(dev); |
45ae7cff WD |
1391 | } |
1392 | ||
c752ce45 WD |
1393 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
1394 | enum iommu_attr attr, void *data) | |
1395 | { | |
1396 | struct arm_smmu_domain *smmu_domain = domain->priv; | |
1397 | ||
1398 | switch (attr) { | |
1399 | case DOMAIN_ATTR_NESTING: | |
1400 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | |
1401 | return 0; | |
1402 | default: | |
1403 | return -ENODEV; | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |
1408 | enum iommu_attr attr, void *data) | |
1409 | { | |
518f7136 | 1410 | int ret = 0; |
c752ce45 WD |
1411 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1412 | ||
518f7136 WD |
1413 | mutex_lock(&smmu_domain->init_mutex); |
1414 | ||
c752ce45 WD |
1415 | switch (attr) { |
1416 | case DOMAIN_ATTR_NESTING: | |
518f7136 WD |
1417 | if (smmu_domain->smmu) { |
1418 | ret = -EPERM; | |
1419 | goto out_unlock; | |
1420 | } | |
1421 | ||
c752ce45 WD |
1422 | if (*(int *)data) |
1423 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; | |
1424 | else | |
1425 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
1426 | ||
518f7136 | 1427 | break; |
c752ce45 | 1428 | default: |
518f7136 | 1429 | ret = -ENODEV; |
c752ce45 | 1430 | } |
518f7136 WD |
1431 | |
1432 | out_unlock: | |
1433 | mutex_unlock(&smmu_domain->init_mutex); | |
1434 | return ret; | |
c752ce45 WD |
1435 | } |
1436 | ||
518f7136 | 1437 | static struct iommu_ops arm_smmu_ops = { |
c752ce45 WD |
1438 | .capable = arm_smmu_capable, |
1439 | .domain_init = arm_smmu_domain_init, | |
1440 | .domain_destroy = arm_smmu_domain_destroy, | |
1441 | .attach_dev = arm_smmu_attach_dev, | |
1442 | .detach_dev = arm_smmu_detach_dev, | |
1443 | .map = arm_smmu_map, | |
1444 | .unmap = arm_smmu_unmap, | |
76771c93 | 1445 | .map_sg = default_iommu_map_sg, |
c752ce45 WD |
1446 | .iova_to_phys = arm_smmu_iova_to_phys, |
1447 | .add_device = arm_smmu_add_device, | |
1448 | .remove_device = arm_smmu_remove_device, | |
1449 | .domain_get_attr = arm_smmu_domain_get_attr, | |
1450 | .domain_set_attr = arm_smmu_domain_set_attr, | |
518f7136 | 1451 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
45ae7cff WD |
1452 | }; |
1453 | ||
1454 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |
1455 | { | |
1456 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
659db6f6 | 1457 | void __iomem *cb_base; |
45ae7cff | 1458 | int i = 0; |
659db6f6 AH |
1459 | u32 reg; |
1460 | ||
3a5df8ff AH |
1461 | /* clear global FSR */ |
1462 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
1463 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
45ae7cff WD |
1464 | |
1465 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | |
1466 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | |
3c8766d0 | 1467 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); |
2907320d MH |
1468 | writel_relaxed(S2CR_TYPE_BYPASS, |
1469 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | |
45ae7cff WD |
1470 | } |
1471 | ||
659db6f6 AH |
1472 | /* Make sure all context banks are disabled and clear CB_FSR */ |
1473 | for (i = 0; i < smmu->num_context_banks; ++i) { | |
1474 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); | |
1475 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | |
1476 | writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); | |
1477 | } | |
1463fe44 | 1478 | |
45ae7cff | 1479 | /* Invalidate the TLB, just in case */ |
45ae7cff WD |
1480 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
1481 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | |
1482 | ||
3a5df8ff | 1483 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
659db6f6 | 1484 | |
45ae7cff | 1485 | /* Enable fault reporting */ |
659db6f6 | 1486 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); |
45ae7cff WD |
1487 | |
1488 | /* Disable TLB broadcasting. */ | |
659db6f6 | 1489 | reg |= (sCR0_VMIDPNE | sCR0_PTM); |
45ae7cff WD |
1490 | |
1491 | /* Enable client access, but bypass when no mapping is found */ | |
659db6f6 | 1492 | reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); |
45ae7cff WD |
1493 | |
1494 | /* Disable forced broadcasting */ | |
659db6f6 | 1495 | reg &= ~sCR0_FB; |
45ae7cff WD |
1496 | |
1497 | /* Don't upgrade barriers */ | |
659db6f6 | 1498 | reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); |
45ae7cff WD |
1499 | |
1500 | /* Push the button */ | |
518f7136 | 1501 | __arm_smmu_tlb_sync(smmu); |
3a5df8ff | 1502 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1503 | } |
1504 | ||
1505 | static int arm_smmu_id_size_to_bits(int size) | |
1506 | { | |
1507 | switch (size) { | |
1508 | case 0: | |
1509 | return 32; | |
1510 | case 1: | |
1511 | return 36; | |
1512 | case 2: | |
1513 | return 40; | |
1514 | case 3: | |
1515 | return 42; | |
1516 | case 4: | |
1517 | return 44; | |
1518 | case 5: | |
1519 | default: | |
1520 | return 48; | |
1521 | } | |
1522 | } | |
1523 | ||
1524 | static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |
1525 | { | |
1526 | unsigned long size; | |
1527 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1528 | u32 id; | |
1529 | ||
1530 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | |
45ae7cff WD |
1531 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); |
1532 | ||
1533 | /* ID0 */ | |
1534 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | |
4cf740b0 WD |
1535 | |
1536 | /* Restrict available stages based on module parameter */ | |
1537 | if (force_stage == 1) | |
1538 | id &= ~(ID0_S2TS | ID0_NTS); | |
1539 | else if (force_stage == 2) | |
1540 | id &= ~(ID0_S1TS | ID0_NTS); | |
1541 | ||
45ae7cff WD |
1542 | if (id & ID0_S1TS) { |
1543 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | |
1544 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | |
1545 | } | |
1546 | ||
1547 | if (id & ID0_S2TS) { | |
1548 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; | |
1549 | dev_notice(smmu->dev, "\tstage 2 translation\n"); | |
1550 | } | |
1551 | ||
1552 | if (id & ID0_NTS) { | |
1553 | smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; | |
1554 | dev_notice(smmu->dev, "\tnested translation\n"); | |
1555 | } | |
1556 | ||
1557 | if (!(smmu->features & | |
4cf740b0 | 1558 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { |
45ae7cff WD |
1559 | dev_err(smmu->dev, "\tno translation support!\n"); |
1560 | return -ENODEV; | |
1561 | } | |
1562 | ||
859a732e MH |
1563 | if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { |
1564 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; | |
1565 | dev_notice(smmu->dev, "\taddress translation ops\n"); | |
1566 | } | |
1567 | ||
45ae7cff WD |
1568 | if (id & ID0_CTTW) { |
1569 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; | |
1570 | dev_notice(smmu->dev, "\tcoherent table walk\n"); | |
1571 | } | |
1572 | ||
1573 | if (id & ID0_SMS) { | |
1574 | u32 smr, sid, mask; | |
1575 | ||
1576 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | |
1577 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | |
1578 | ID0_NUMSMRG_MASK; | |
1579 | if (smmu->num_mapping_groups == 0) { | |
1580 | dev_err(smmu->dev, | |
1581 | "stream-matching supported, but no SMRs present!\n"); | |
1582 | return -ENODEV; | |
1583 | } | |
1584 | ||
1585 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | |
1586 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | |
1587 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1588 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1589 | ||
1590 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | |
1591 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | |
1592 | if ((mask & sid) != sid) { | |
1593 | dev_err(smmu->dev, | |
1594 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | |
1595 | mask, sid); | |
1596 | return -ENODEV; | |
1597 | } | |
1598 | ||
1599 | dev_notice(smmu->dev, | |
1600 | "\tstream matching with %u register groups, mask 0x%x", | |
1601 | smmu->num_mapping_groups, mask); | |
3c8766d0 OH |
1602 | } else { |
1603 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | |
1604 | ID0_NUMSIDB_MASK; | |
45ae7cff WD |
1605 | } |
1606 | ||
1607 | /* ID1 */ | |
1608 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | |
c757e852 | 1609 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; |
45ae7cff | 1610 | |
c55af7f7 | 1611 | /* Check for size mismatch of SMMU address space from mapped region */ |
518f7136 | 1612 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
c757e852 | 1613 | size *= 2 << smmu->pgshift; |
c55af7f7 | 1614 | if (smmu->size != size) |
2907320d MH |
1615 | dev_warn(smmu->dev, |
1616 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | |
1617 | size, smmu->size); | |
45ae7cff | 1618 | |
518f7136 | 1619 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; |
45ae7cff WD |
1620 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; |
1621 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | |
1622 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | |
1623 | return -ENODEV; | |
1624 | } | |
1625 | dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", | |
1626 | smmu->num_context_banks, smmu->num_s2_context_banks); | |
1627 | ||
1628 | /* ID2 */ | |
1629 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | |
1630 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | |
518f7136 | 1631 | smmu->ipa_size = size; |
45ae7cff | 1632 | |
518f7136 | 1633 | /* The output mask is also applied for bypass */ |
45ae7cff | 1634 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
518f7136 | 1635 | smmu->pa_size = size; |
45ae7cff | 1636 | |
09360403 | 1637 | if (smmu->version == ARM_SMMU_V1) { |
518f7136 WD |
1638 | smmu->va_size = smmu->ipa_size; |
1639 | size = SZ_4K | SZ_2M | SZ_1G; | |
45ae7cff | 1640 | } else { |
45ae7cff | 1641 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; |
518f7136 WD |
1642 | smmu->va_size = arm_smmu_id_size_to_bits(size); |
1643 | #ifndef CONFIG_64BIT | |
1644 | smmu->va_size = min(32UL, smmu->va_size); | |
45ae7cff | 1645 | #endif |
518f7136 WD |
1646 | size = 0; |
1647 | if (id & ID2_PTFS_4K) | |
1648 | size |= SZ_4K | SZ_2M | SZ_1G; | |
1649 | if (id & ID2_PTFS_16K) | |
1650 | size |= SZ_16K | SZ_32M; | |
1651 | if (id & ID2_PTFS_64K) | |
1652 | size |= SZ_64K | SZ_512M; | |
45ae7cff WD |
1653 | } |
1654 | ||
518f7136 WD |
1655 | arm_smmu_ops.pgsize_bitmap &= size; |
1656 | dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); | |
1657 | ||
28d6007b WD |
1658 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) |
1659 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", | |
518f7136 | 1660 | smmu->va_size, smmu->ipa_size); |
28d6007b WD |
1661 | |
1662 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) | |
1663 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", | |
518f7136 | 1664 | smmu->ipa_size, smmu->pa_size); |
28d6007b | 1665 | |
45ae7cff WD |
1666 | return 0; |
1667 | } | |
1668 | ||
09b5269a | 1669 | static const struct of_device_id arm_smmu_of_match[] = { |
09360403 RM |
1670 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, |
1671 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, | |
1672 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, | |
d3aba046 | 1673 | { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 }, |
09360403 RM |
1674 | { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 }, |
1675 | { }, | |
1676 | }; | |
1677 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | |
1678 | ||
45ae7cff WD |
1679 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1680 | { | |
09360403 | 1681 | const struct of_device_id *of_id; |
45ae7cff WD |
1682 | struct resource *res; |
1683 | struct arm_smmu_device *smmu; | |
45ae7cff WD |
1684 | struct device *dev = &pdev->dev; |
1685 | struct rb_node *node; | |
1686 | struct of_phandle_args masterspec; | |
1687 | int num_irqs, i, err; | |
1688 | ||
1689 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | |
1690 | if (!smmu) { | |
1691 | dev_err(dev, "failed to allocate arm_smmu_device\n"); | |
1692 | return -ENOMEM; | |
1693 | } | |
1694 | smmu->dev = dev; | |
1695 | ||
09360403 RM |
1696 | of_id = of_match_node(arm_smmu_of_match, dev->of_node); |
1697 | smmu->version = (enum arm_smmu_arch_version)of_id->data; | |
1698 | ||
45ae7cff | 1699 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
8a7f4312 JL |
1700 | smmu->base = devm_ioremap_resource(dev, res); |
1701 | if (IS_ERR(smmu->base)) | |
1702 | return PTR_ERR(smmu->base); | |
45ae7cff | 1703 | smmu->size = resource_size(res); |
45ae7cff WD |
1704 | |
1705 | if (of_property_read_u32(dev->of_node, "#global-interrupts", | |
1706 | &smmu->num_global_irqs)) { | |
1707 | dev_err(dev, "missing #global-interrupts property\n"); | |
1708 | return -ENODEV; | |
1709 | } | |
1710 | ||
1711 | num_irqs = 0; | |
1712 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | |
1713 | num_irqs++; | |
1714 | if (num_irqs > smmu->num_global_irqs) | |
1715 | smmu->num_context_irqs++; | |
1716 | } | |
1717 | ||
44a08de2 AH |
1718 | if (!smmu->num_context_irqs) { |
1719 | dev_err(dev, "found %d interrupts but expected at least %d\n", | |
1720 | num_irqs, smmu->num_global_irqs + 1); | |
1721 | return -ENODEV; | |
45ae7cff | 1722 | } |
45ae7cff WD |
1723 | |
1724 | smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, | |
1725 | GFP_KERNEL); | |
1726 | if (!smmu->irqs) { | |
1727 | dev_err(dev, "failed to allocate %d irqs\n", num_irqs); | |
1728 | return -ENOMEM; | |
1729 | } | |
1730 | ||
1731 | for (i = 0; i < num_irqs; ++i) { | |
1732 | int irq = platform_get_irq(pdev, i); | |
2907320d | 1733 | |
45ae7cff WD |
1734 | if (irq < 0) { |
1735 | dev_err(dev, "failed to get irq index %d\n", i); | |
1736 | return -ENODEV; | |
1737 | } | |
1738 | smmu->irqs[i] = irq; | |
1739 | } | |
1740 | ||
3c8766d0 OH |
1741 | err = arm_smmu_device_cfg_probe(smmu); |
1742 | if (err) | |
1743 | return err; | |
1744 | ||
45ae7cff WD |
1745 | i = 0; |
1746 | smmu->masters = RB_ROOT; | |
1747 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | |
1748 | "#stream-id-cells", i, | |
1749 | &masterspec)) { | |
1750 | err = register_smmu_master(smmu, dev, &masterspec); | |
1751 | if (err) { | |
1752 | dev_err(dev, "failed to add master %s\n", | |
1753 | masterspec.np->name); | |
1754 | goto out_put_masters; | |
1755 | } | |
1756 | ||
1757 | i++; | |
1758 | } | |
1759 | dev_notice(dev, "registered %d master devices\n", i); | |
1760 | ||
3a5df8ff AH |
1761 | parse_driver_options(smmu); |
1762 | ||
09360403 | 1763 | if (smmu->version > ARM_SMMU_V1 && |
45ae7cff WD |
1764 | smmu->num_context_banks != smmu->num_context_irqs) { |
1765 | dev_err(dev, | |
1766 | "found only %d context interrupt(s) but %d required\n", | |
1767 | smmu->num_context_irqs, smmu->num_context_banks); | |
89a23cde | 1768 | err = -ENODEV; |
44680eed | 1769 | goto out_put_masters; |
45ae7cff WD |
1770 | } |
1771 | ||
45ae7cff WD |
1772 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
1773 | err = request_irq(smmu->irqs[i], | |
1774 | arm_smmu_global_fault, | |
1775 | IRQF_SHARED, | |
1776 | "arm-smmu global fault", | |
1777 | smmu); | |
1778 | if (err) { | |
1779 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | |
1780 | i, smmu->irqs[i]); | |
1781 | goto out_free_irqs; | |
1782 | } | |
1783 | } | |
1784 | ||
1785 | INIT_LIST_HEAD(&smmu->list); | |
1786 | spin_lock(&arm_smmu_devices_lock); | |
1787 | list_add(&smmu->list, &arm_smmu_devices); | |
1788 | spin_unlock(&arm_smmu_devices_lock); | |
fd90cecb WD |
1789 | |
1790 | arm_smmu_device_reset(smmu); | |
45ae7cff WD |
1791 | return 0; |
1792 | ||
1793 | out_free_irqs: | |
1794 | while (i--) | |
1795 | free_irq(smmu->irqs[i], smmu); | |
1796 | ||
45ae7cff WD |
1797 | out_put_masters: |
1798 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | |
2907320d MH |
1799 | struct arm_smmu_master *master |
1800 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1801 | of_node_put(master->of_node); |
1802 | } | |
1803 | ||
1804 | return err; | |
1805 | } | |
1806 | ||
1807 | static int arm_smmu_device_remove(struct platform_device *pdev) | |
1808 | { | |
1809 | int i; | |
1810 | struct device *dev = &pdev->dev; | |
1811 | struct arm_smmu_device *curr, *smmu = NULL; | |
1812 | struct rb_node *node; | |
1813 | ||
1814 | spin_lock(&arm_smmu_devices_lock); | |
1815 | list_for_each_entry(curr, &arm_smmu_devices, list) { | |
1816 | if (curr->dev == dev) { | |
1817 | smmu = curr; | |
1818 | list_del(&smmu->list); | |
1819 | break; | |
1820 | } | |
1821 | } | |
1822 | spin_unlock(&arm_smmu_devices_lock); | |
1823 | ||
1824 | if (!smmu) | |
1825 | return -ENODEV; | |
1826 | ||
45ae7cff | 1827 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { |
2907320d MH |
1828 | struct arm_smmu_master *master |
1829 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1830 | of_node_put(master->of_node); |
1831 | } | |
1832 | ||
ecfadb6e | 1833 | if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) |
45ae7cff WD |
1834 | dev_err(dev, "removing device with active domains!\n"); |
1835 | ||
1836 | for (i = 0; i < smmu->num_global_irqs; ++i) | |
1837 | free_irq(smmu->irqs[i], smmu); | |
1838 | ||
1839 | /* Turn the thing off */ | |
2907320d | 1840 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1841 | return 0; |
1842 | } | |
1843 | ||
45ae7cff WD |
1844 | static struct platform_driver arm_smmu_driver = { |
1845 | .driver = { | |
45ae7cff WD |
1846 | .name = "arm-smmu", |
1847 | .of_match_table = of_match_ptr(arm_smmu_of_match), | |
1848 | }, | |
1849 | .probe = arm_smmu_device_dt_probe, | |
1850 | .remove = arm_smmu_device_remove, | |
1851 | }; | |
1852 | ||
1853 | static int __init arm_smmu_init(void) | |
1854 | { | |
0e7d37ad | 1855 | struct device_node *np; |
45ae7cff WD |
1856 | int ret; |
1857 | ||
0e7d37ad TR |
1858 | /* |
1859 | * Play nice with systems that don't have an ARM SMMU by checking that | |
1860 | * an ARM SMMU exists in the system before proceeding with the driver | |
1861 | * and IOMMU bus operation registration. | |
1862 | */ | |
1863 | np = of_find_matching_node(NULL, arm_smmu_of_match); | |
1864 | if (!np) | |
1865 | return 0; | |
1866 | ||
1867 | of_node_put(np); | |
1868 | ||
45ae7cff WD |
1869 | ret = platform_driver_register(&arm_smmu_driver); |
1870 | if (ret) | |
1871 | return ret; | |
1872 | ||
1873 | /* Oh, for a proper bus abstraction */ | |
6614ee77 | 1874 | if (!iommu_present(&platform_bus_type)) |
45ae7cff WD |
1875 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1876 | ||
d123cf82 | 1877 | #ifdef CONFIG_ARM_AMBA |
6614ee77 | 1878 | if (!iommu_present(&amba_bustype)) |
45ae7cff | 1879 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
d123cf82 | 1880 | #endif |
45ae7cff | 1881 | |
a9a1b0b5 WD |
1882 | #ifdef CONFIG_PCI |
1883 | if (!iommu_present(&pci_bus_type)) | |
1884 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | |
1885 | #endif | |
1886 | ||
45ae7cff WD |
1887 | return 0; |
1888 | } | |
1889 | ||
1890 | static void __exit arm_smmu_exit(void) | |
1891 | { | |
1892 | return platform_driver_unregister(&arm_smmu_driver); | |
1893 | } | |
1894 | ||
b1950b27 | 1895 | subsys_initcall(arm_smmu_init); |
45ae7cff WD |
1896 | module_exit(arm_smmu_exit); |
1897 | ||
1898 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | |
1899 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | |
1900 | MODULE_LICENSE("GPL v2"); |