2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/interrupt.h>
26 #include <linux/iommu.h>
27 #include <linux/iopoll.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
31 #include <linux/of_address.h>
32 #include <linux/of_platform.h>
33 #include <linux/pci.h>
34 #include <linux/platform_device.h>
36 #include "io-pgtable.h"
39 #define ARM_SMMU_IDR0 0x0
40 #define IDR0_ST_LVL_SHIFT 27
41 #define IDR0_ST_LVL_MASK 0x3
42 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
43 #define IDR0_STALL_MODEL (3 << 24)
44 #define IDR0_TTENDIAN_SHIFT 21
45 #define IDR0_TTENDIAN_MASK 0x3
46 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
47 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
48 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
49 #define IDR0_CD2L (1 << 19)
50 #define IDR0_VMID16 (1 << 18)
51 #define IDR0_PRI (1 << 16)
52 #define IDR0_SEV (1 << 14)
53 #define IDR0_MSI (1 << 13)
54 #define IDR0_ASID16 (1 << 12)
55 #define IDR0_ATS (1 << 10)
56 #define IDR0_HYP (1 << 9)
57 #define IDR0_COHACC (1 << 4)
58 #define IDR0_TTF_SHIFT 2
59 #define IDR0_TTF_MASK 0x3
60 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
61 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
62 #define IDR0_S1P (1 << 1)
63 #define IDR0_S2P (1 << 0)
65 #define ARM_SMMU_IDR1 0x4
66 #define IDR1_TABLES_PRESET (1 << 30)
67 #define IDR1_QUEUES_PRESET (1 << 29)
68 #define IDR1_REL (1 << 28)
69 #define IDR1_CMDQ_SHIFT 21
70 #define IDR1_CMDQ_MASK 0x1f
71 #define IDR1_EVTQ_SHIFT 16
72 #define IDR1_EVTQ_MASK 0x1f
73 #define IDR1_PRIQ_SHIFT 11
74 #define IDR1_PRIQ_MASK 0x1f
75 #define IDR1_SSID_SHIFT 6
76 #define IDR1_SSID_MASK 0x1f
77 #define IDR1_SID_SHIFT 0
78 #define IDR1_SID_MASK 0x3f
80 #define ARM_SMMU_IDR5 0x14
81 #define IDR5_STALL_MAX_SHIFT 16
82 #define IDR5_STALL_MAX_MASK 0xffff
83 #define IDR5_GRAN64K (1 << 6)
84 #define IDR5_GRAN16K (1 << 5)
85 #define IDR5_GRAN4K (1 << 4)
86 #define IDR5_OAS_SHIFT 0
87 #define IDR5_OAS_MASK 0x7
88 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
89 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
90 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
91 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
92 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
93 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
95 #define ARM_SMMU_CR0 0x20
96 #define CR0_CMDQEN (1 << 3)
97 #define CR0_EVTQEN (1 << 2)
98 #define CR0_PRIQEN (1 << 1)
99 #define CR0_SMMUEN (1 << 0)
101 #define ARM_SMMU_CR0ACK 0x24
103 #define ARM_SMMU_CR1 0x28
107 #define CR1_CACHE_NC 0
108 #define CR1_CACHE_WB 1
109 #define CR1_CACHE_WT 2
110 #define CR1_TABLE_SH_SHIFT 10
111 #define CR1_TABLE_OC_SHIFT 8
112 #define CR1_TABLE_IC_SHIFT 6
113 #define CR1_QUEUE_SH_SHIFT 4
114 #define CR1_QUEUE_OC_SHIFT 2
115 #define CR1_QUEUE_IC_SHIFT 0
117 #define ARM_SMMU_CR2 0x2c
118 #define CR2_PTM (1 << 2)
119 #define CR2_RECINVSID (1 << 1)
120 #define CR2_E2H (1 << 0)
122 #define ARM_SMMU_IRQ_CTRL 0x50
123 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
124 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
125 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
127 #define ARM_SMMU_IRQ_CTRLACK 0x54
129 #define ARM_SMMU_GERROR 0x60
130 #define GERROR_SFM_ERR (1 << 8)
131 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
132 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
133 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
134 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
135 #define GERROR_PRIQ_ABT_ERR (1 << 3)
136 #define GERROR_EVTQ_ABT_ERR (1 << 2)
137 #define GERROR_CMDQ_ERR (1 << 0)
138 #define GERROR_ERR_MASK 0xfd
140 #define ARM_SMMU_GERRORN 0x64
142 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
143 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
144 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
146 #define ARM_SMMU_STRTAB_BASE 0x80
147 #define STRTAB_BASE_RA (1UL << 62)
148 #define STRTAB_BASE_ADDR_SHIFT 6
149 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
151 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
152 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
153 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
154 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
155 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
156 #define STRTAB_BASE_CFG_FMT_SHIFT 16
157 #define STRTAB_BASE_CFG_FMT_MASK 0x3
158 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
159 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
161 #define ARM_SMMU_CMDQ_BASE 0x90
162 #define ARM_SMMU_CMDQ_PROD 0x98
163 #define ARM_SMMU_CMDQ_CONS 0x9c
165 #define ARM_SMMU_EVTQ_BASE 0xa0
166 #define ARM_SMMU_EVTQ_PROD 0x100a8
167 #define ARM_SMMU_EVTQ_CONS 0x100ac
168 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
169 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
170 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
172 #define ARM_SMMU_PRIQ_BASE 0xc0
173 #define ARM_SMMU_PRIQ_PROD 0x100c8
174 #define ARM_SMMU_PRIQ_CONS 0x100cc
175 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
176 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
177 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
179 /* Common MSI config fields */
180 #define MSI_CFG0_ADDR_SHIFT 2
181 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
182 #define MSI_CFG2_SH_SHIFT 4
183 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
184 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
185 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
186 #define MSI_CFG2_MEMATTR_SHIFT 0
187 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
189 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
190 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
191 #define Q_OVERFLOW_FLAG (1 << 31)
192 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
193 #define Q_ENT(q, p) ((q)->base + \
194 Q_IDX(q, p) * (q)->ent_dwords)
196 #define Q_BASE_RWA (1UL << 62)
197 #define Q_BASE_ADDR_SHIFT 5
198 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
199 #define Q_BASE_LOG2SIZE_SHIFT 0
200 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
205 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
206 * 2lvl: 128k L1 entries,
207 * 256 lazy entries per table (each table covers a PCI bus)
209 #define STRTAB_L1_SZ_SHIFT 20
210 #define STRTAB_SPLIT 8
212 #define STRTAB_L1_DESC_DWORDS 1
213 #define STRTAB_L1_DESC_SPAN_SHIFT 0
214 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
215 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
216 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
218 #define STRTAB_STE_DWORDS 8
219 #define STRTAB_STE_0_V (1UL << 0)
220 #define STRTAB_STE_0_CFG_SHIFT 1
221 #define STRTAB_STE_0_CFG_MASK 0x7UL
222 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
223 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
224 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
225 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
227 #define STRTAB_STE_0_S1FMT_SHIFT 4
228 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
229 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
230 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
232 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
234 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
235 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
236 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
237 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
238 #define STRTAB_STE_1_S1C_SH_NSH 0UL
239 #define STRTAB_STE_1_S1C_SH_OSH 2UL
240 #define STRTAB_STE_1_S1C_SH_ISH 3UL
241 #define STRTAB_STE_1_S1CIR_SHIFT 2
242 #define STRTAB_STE_1_S1COR_SHIFT 4
243 #define STRTAB_STE_1_S1CSH_SHIFT 6
245 #define STRTAB_STE_1_S1STALLD (1UL << 27)
247 #define STRTAB_STE_1_EATS_ABT 0UL
248 #define STRTAB_STE_1_EATS_TRANS 1UL
249 #define STRTAB_STE_1_EATS_S1CHK 2UL
250 #define STRTAB_STE_1_EATS_SHIFT 28
252 #define STRTAB_STE_1_STRW_NSEL1 0UL
253 #define STRTAB_STE_1_STRW_EL2 2UL
254 #define STRTAB_STE_1_STRW_SHIFT 30
256 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
257 #define STRTAB_STE_1_SHCFG_SHIFT 44
259 #define STRTAB_STE_2_S2VMID_SHIFT 0
260 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
261 #define STRTAB_STE_2_VTCR_SHIFT 32
262 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
263 #define STRTAB_STE_2_S2AA64 (1UL << 51)
264 #define STRTAB_STE_2_S2ENDI (1UL << 52)
265 #define STRTAB_STE_2_S2PTW (1UL << 54)
266 #define STRTAB_STE_2_S2R (1UL << 58)
268 #define STRTAB_STE_3_S2TTB_SHIFT 4
269 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
271 /* Context descriptor (stage-1 only) */
272 #define CTXDESC_CD_DWORDS 8
273 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
274 #define ARM64_TCR_T0SZ_SHIFT 0
275 #define ARM64_TCR_T0SZ_MASK 0x1fUL
276 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
277 #define ARM64_TCR_TG0_SHIFT 14
278 #define ARM64_TCR_TG0_MASK 0x3UL
279 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
280 #define ARM64_TCR_IRGN0_SHIFT 8
281 #define ARM64_TCR_IRGN0_MASK 0x3UL
282 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
283 #define ARM64_TCR_ORGN0_SHIFT 10
284 #define ARM64_TCR_ORGN0_MASK 0x3UL
285 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
286 #define ARM64_TCR_SH0_SHIFT 12
287 #define ARM64_TCR_SH0_MASK 0x3UL
288 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
289 #define ARM64_TCR_EPD0_SHIFT 7
290 #define ARM64_TCR_EPD0_MASK 0x1UL
291 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
292 #define ARM64_TCR_EPD1_SHIFT 23
293 #define ARM64_TCR_EPD1_MASK 0x1UL
295 #define CTXDESC_CD_0_ENDI (1UL << 15)
296 #define CTXDESC_CD_0_V (1UL << 31)
298 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
299 #define ARM64_TCR_IPS_SHIFT 32
300 #define ARM64_TCR_IPS_MASK 0x7UL
301 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
302 #define ARM64_TCR_TBI0_SHIFT 37
303 #define ARM64_TCR_TBI0_MASK 0x1UL
305 #define CTXDESC_CD_0_AA64 (1UL << 41)
306 #define CTXDESC_CD_0_R (1UL << 45)
307 #define CTXDESC_CD_0_A (1UL << 46)
308 #define CTXDESC_CD_0_ASET_SHIFT 47
309 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
310 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
311 #define CTXDESC_CD_0_ASID_SHIFT 48
312 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
314 #define CTXDESC_CD_1_TTB0_SHIFT 4
315 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
317 #define CTXDESC_CD_3_MAIR_SHIFT 0
319 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
320 #define ARM_SMMU_TCR2CD(tcr, fld) \
321 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
322 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
325 #define CMDQ_ENT_DWORDS 2
326 #define CMDQ_MAX_SZ_SHIFT 8
328 #define CMDQ_ERR_SHIFT 24
329 #define CMDQ_ERR_MASK 0x7f
330 #define CMDQ_ERR_CERROR_NONE_IDX 0
331 #define CMDQ_ERR_CERROR_ILL_IDX 1
332 #define CMDQ_ERR_CERROR_ABT_IDX 2
334 #define CMDQ_0_OP_SHIFT 0
335 #define CMDQ_0_OP_MASK 0xffUL
336 #define CMDQ_0_SSV (1UL << 11)
338 #define CMDQ_PREFETCH_0_SID_SHIFT 32
339 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
340 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
342 #define CMDQ_CFGI_0_SID_SHIFT 32
343 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
344 #define CMDQ_CFGI_1_LEAF (1UL << 0)
345 #define CMDQ_CFGI_1_RANGE_SHIFT 0
346 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
348 #define CMDQ_TLBI_0_VMID_SHIFT 32
349 #define CMDQ_TLBI_0_ASID_SHIFT 48
350 #define CMDQ_TLBI_1_LEAF (1UL << 0)
351 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
352 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
354 #define CMDQ_PRI_0_SSID_SHIFT 12
355 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
356 #define CMDQ_PRI_0_SID_SHIFT 32
357 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
358 #define CMDQ_PRI_1_GRPID_SHIFT 0
359 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
360 #define CMDQ_PRI_1_RESP_SHIFT 12
361 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
362 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
363 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
365 #define CMDQ_SYNC_0_CS_SHIFT 12
366 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
367 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
370 #define EVTQ_ENT_DWORDS 4
371 #define EVTQ_MAX_SZ_SHIFT 7
373 #define EVTQ_0_ID_SHIFT 0
374 #define EVTQ_0_ID_MASK 0xffUL
377 #define PRIQ_ENT_DWORDS 2
378 #define PRIQ_MAX_SZ_SHIFT 8
380 #define PRIQ_0_SID_SHIFT 0
381 #define PRIQ_0_SID_MASK 0xffffffffUL
382 #define PRIQ_0_SSID_SHIFT 32
383 #define PRIQ_0_SSID_MASK 0xfffffUL
384 #define PRIQ_0_PERM_PRIV (1UL << 58)
385 #define PRIQ_0_PERM_EXEC (1UL << 59)
386 #define PRIQ_0_PERM_READ (1UL << 60)
387 #define PRIQ_0_PERM_WRITE (1UL << 61)
388 #define PRIQ_0_PRG_LAST (1UL << 62)
389 #define PRIQ_0_SSID_V (1UL << 63)
391 #define PRIQ_1_PRG_IDX_SHIFT 0
392 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
393 #define PRIQ_1_ADDR_SHIFT 12
394 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
396 /* High-level queue structures */
397 #define ARM_SMMU_POLL_TIMEOUT_US 100
399 static bool disable_bypass;
400 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
401 MODULE_PARM_DESC(disable_bypass,
402 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
410 enum arm_smmu_msi_index {
417 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
419 ARM_SMMU_EVTQ_IRQ_CFG0,
420 ARM_SMMU_EVTQ_IRQ_CFG1,
421 ARM_SMMU_EVTQ_IRQ_CFG2,
423 [GERROR_MSI_INDEX] = {
424 ARM_SMMU_GERROR_IRQ_CFG0,
425 ARM_SMMU_GERROR_IRQ_CFG1,
426 ARM_SMMU_GERROR_IRQ_CFG2,
429 ARM_SMMU_PRIQ_IRQ_CFG0,
430 ARM_SMMU_PRIQ_IRQ_CFG1,
431 ARM_SMMU_PRIQ_IRQ_CFG2,
435 struct arm_smmu_cmdq_ent {
438 bool substream_valid;
440 /* Command-specific fields */
442 #define CMDQ_OP_PREFETCH_CFG 0x1
449 #define CMDQ_OP_CFGI_STE 0x3
450 #define CMDQ_OP_CFGI_ALL 0x4
459 #define CMDQ_OP_TLBI_NH_ASID 0x11
460 #define CMDQ_OP_TLBI_NH_VA 0x12
461 #define CMDQ_OP_TLBI_EL2_ALL 0x20
462 #define CMDQ_OP_TLBI_S12_VMALL 0x28
463 #define CMDQ_OP_TLBI_S2_IPA 0x2a
464 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
472 #define CMDQ_OP_PRI_RESP 0x41
480 #define CMDQ_OP_CMD_SYNC 0x46
484 struct arm_smmu_queue {
485 int irq; /* Wired interrupt */
496 u32 __iomem *prod_reg;
497 u32 __iomem *cons_reg;
500 struct arm_smmu_cmdq {
501 struct arm_smmu_queue q;
505 struct arm_smmu_evtq {
506 struct arm_smmu_queue q;
510 struct arm_smmu_priq {
511 struct arm_smmu_queue q;
514 /* High-level stream table and context descriptor structures */
515 struct arm_smmu_strtab_l1_desc {
519 dma_addr_t l2ptr_dma;
522 struct arm_smmu_s1_cfg {
524 dma_addr_t cdptr_dma;
526 struct arm_smmu_ctx_desc {
534 struct arm_smmu_s2_cfg {
540 struct arm_smmu_strtab_ent {
543 bool bypass; /* Overrides s1/s2 config */
544 struct arm_smmu_s1_cfg *s1_cfg;
545 struct arm_smmu_s2_cfg *s2_cfg;
548 struct arm_smmu_strtab_cfg {
550 dma_addr_t strtab_dma;
551 struct arm_smmu_strtab_l1_desc *l1_desc;
552 unsigned int num_l1_ents;
558 /* An SMMUv3 instance */
559 struct arm_smmu_device {
563 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
564 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
565 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
566 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
567 #define ARM_SMMU_FEAT_PRI (1 << 4)
568 #define ARM_SMMU_FEAT_ATS (1 << 5)
569 #define ARM_SMMU_FEAT_SEV (1 << 6)
570 #define ARM_SMMU_FEAT_MSI (1 << 7)
571 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
572 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
573 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
574 #define ARM_SMMU_FEAT_STALLS (1 << 11)
575 #define ARM_SMMU_FEAT_HYP (1 << 12)
578 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
581 struct arm_smmu_cmdq cmdq;
582 struct arm_smmu_evtq evtq;
583 struct arm_smmu_priq priq;
587 unsigned long ias; /* IPA */
588 unsigned long oas; /* PA */
590 #define ARM_SMMU_MAX_ASIDS (1 << 16)
591 unsigned int asid_bits;
592 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
594 #define ARM_SMMU_MAX_VMIDS (1 << 16)
595 unsigned int vmid_bits;
596 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
598 unsigned int ssid_bits;
599 unsigned int sid_bits;
601 struct arm_smmu_strtab_cfg strtab_cfg;
604 /* SMMU private data for an IOMMU group */
605 struct arm_smmu_group {
606 struct arm_smmu_device *smmu;
607 struct arm_smmu_domain *domain;
610 struct arm_smmu_strtab_ent ste;
613 /* SMMU private data for an IOMMU domain */
614 enum arm_smmu_domain_stage {
615 ARM_SMMU_DOMAIN_S1 = 0,
617 ARM_SMMU_DOMAIN_NESTED,
620 struct arm_smmu_domain {
621 struct arm_smmu_device *smmu;
622 struct mutex init_mutex; /* Protects smmu pointer */
624 struct io_pgtable_ops *pgtbl_ops;
625 spinlock_t pgtbl_lock;
627 enum arm_smmu_domain_stage stage;
629 struct arm_smmu_s1_cfg s1_cfg;
630 struct arm_smmu_s2_cfg s2_cfg;
633 struct iommu_domain domain;
636 struct arm_smmu_option_prop {
641 static struct arm_smmu_option_prop arm_smmu_options[] = {
642 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
646 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
648 return container_of(dom, struct arm_smmu_domain, domain);
651 static void parse_driver_options(struct arm_smmu_device *smmu)
656 if (of_property_read_bool(smmu->dev->of_node,
657 arm_smmu_options[i].prop)) {
658 smmu->options |= arm_smmu_options[i].opt;
659 dev_notice(smmu->dev, "option %s\n",
660 arm_smmu_options[i].prop);
662 } while (arm_smmu_options[++i].opt);
665 /* Low-level queue manipulation functions */
666 static bool queue_full(struct arm_smmu_queue *q)
668 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
669 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
672 static bool queue_empty(struct arm_smmu_queue *q)
674 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
675 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
678 static void queue_sync_cons(struct arm_smmu_queue *q)
680 q->cons = readl_relaxed(q->cons_reg);
683 static void queue_inc_cons(struct arm_smmu_queue *q)
685 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
687 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
688 writel(q->cons, q->cons_reg);
691 static int queue_sync_prod(struct arm_smmu_queue *q)
694 u32 prod = readl_relaxed(q->prod_reg);
696 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
703 static void queue_inc_prod(struct arm_smmu_queue *q)
705 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
707 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
708 writel(q->prod, q->prod_reg);
711 static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
713 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
714 return Q_IDX(q, q->cons) < Q_IDX(q, until);
716 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
719 static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
721 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
723 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
724 if (ktime_compare(ktime_get(), timeout) > 0)
738 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
742 for (i = 0; i < n_dwords; ++i)
743 *dst++ = cpu_to_le64(*src++);
746 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
751 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
756 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
760 for (i = 0; i < n_dwords; ++i)
761 *dst++ = le64_to_cpu(*src++);
764 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
769 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
774 /* High-level queue accessors */
775 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
777 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
778 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
780 switch (ent->opcode) {
781 case CMDQ_OP_TLBI_EL2_ALL:
782 case CMDQ_OP_TLBI_NSNH_ALL:
784 case CMDQ_OP_PREFETCH_CFG:
785 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
786 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
787 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
789 case CMDQ_OP_CFGI_STE:
790 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
791 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
793 case CMDQ_OP_CFGI_ALL:
794 /* Cover the entire SID range */
795 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
797 case CMDQ_OP_TLBI_NH_VA:
798 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
799 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
800 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
802 case CMDQ_OP_TLBI_S2_IPA:
803 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
804 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
805 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
807 case CMDQ_OP_TLBI_NH_ASID:
808 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
810 case CMDQ_OP_TLBI_S12_VMALL:
811 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
813 case CMDQ_OP_PRI_RESP:
814 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
815 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
816 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
817 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
818 switch (ent->pri.resp) {
820 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
823 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
826 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
832 case CMDQ_OP_CMD_SYNC:
833 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
842 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
844 static const char *cerror_str[] = {
845 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
846 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
847 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
851 u64 cmd[CMDQ_ENT_DWORDS];
852 struct arm_smmu_queue *q = &smmu->cmdq.q;
853 u32 cons = readl_relaxed(q->cons_reg);
854 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
855 struct arm_smmu_cmdq_ent cmd_sync = {
856 .opcode = CMDQ_OP_CMD_SYNC,
859 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
863 case CMDQ_ERR_CERROR_ILL_IDX:
865 case CMDQ_ERR_CERROR_ABT_IDX:
866 dev_err(smmu->dev, "retrying command fetch\n");
867 case CMDQ_ERR_CERROR_NONE_IDX:
872 * We may have concurrent producers, so we need to be careful
873 * not to touch any of the shadow cmdq state.
875 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
876 dev_err(smmu->dev, "skipping command in error state:\n");
877 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
878 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
880 /* Convert the erroneous command into a CMD_SYNC */
881 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
882 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
886 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
889 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
890 struct arm_smmu_cmdq_ent *ent)
893 u64 cmd[CMDQ_ENT_DWORDS];
894 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
895 struct arm_smmu_queue *q = &smmu->cmdq.q;
897 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
898 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
903 spin_lock(&smmu->cmdq.lock);
904 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
906 * Keep the queue locked, otherwise the producer could wrap
907 * twice and we could see a future consumer pointer that looks
908 * like it's behind us.
910 if (queue_poll_cons(q, until, wfe))
911 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
914 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
915 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
916 spin_unlock(&smmu->cmdq.lock);
919 /* Context descriptor manipulation functions */
920 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
924 /* Repack the TCR. Just care about TTBR0 for now */
925 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
926 val |= ARM_SMMU_TCR2CD(tcr, TG0);
927 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
928 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
929 val |= ARM_SMMU_TCR2CD(tcr, SH0);
930 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
931 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
932 val |= ARM_SMMU_TCR2CD(tcr, IPS);
933 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
938 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
939 struct arm_smmu_s1_cfg *cfg)
944 * We don't need to issue any invalidation here, as we'll invalidate
945 * the STE when installing the new entry anyway.
947 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
951 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
952 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
954 cfg->cdptr[0] = cpu_to_le64(val);
956 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
957 cfg->cdptr[1] = cpu_to_le64(val);
959 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
962 /* Stream table manipulation functions */
964 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
968 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
969 << STRTAB_L1_DESC_SPAN_SHIFT;
970 val |= desc->l2ptr_dma &
971 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
973 *dst = cpu_to_le64(val);
976 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
978 struct arm_smmu_cmdq_ent cmd = {
979 .opcode = CMDQ_OP_CFGI_STE,
986 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
987 cmd.opcode = CMDQ_OP_CMD_SYNC;
988 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
991 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
992 __le64 *dst, struct arm_smmu_strtab_ent *ste)
995 * This is hideously complicated, but we only really care about
996 * three cases at the moment:
998 * 1. Invalid (all zero) -> bypass (init)
999 * 2. Bypass -> translation (attach)
1000 * 3. Translation -> bypass (detach)
1002 * Given that we can't update the STE atomically and the SMMU
1003 * doesn't read the thing in a defined order, that leaves us
1004 * with the following maintenance requirements:
1006 * 1. Update Config, return (init time STEs aren't live)
1007 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1008 * 3. Update Config, sync
1010 u64 val = le64_to_cpu(dst[0]);
1011 bool ste_live = false;
1012 struct arm_smmu_cmdq_ent prefetch_cmd = {
1013 .opcode = CMDQ_OP_PREFETCH_CFG,
1019 if (val & STRTAB_STE_0_V) {
1022 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1024 case STRTAB_STE_0_CFG_BYPASS:
1026 case STRTAB_STE_0_CFG_S1_TRANS:
1027 case STRTAB_STE_0_CFG_S2_TRANS:
1031 BUG(); /* STE corruption */
1035 /* Nuke the existing Config, as we're going to rewrite it */
1036 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1039 val |= STRTAB_STE_0_V;
1041 val &= ~STRTAB_STE_0_V;
1044 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1045 : STRTAB_STE_0_CFG_BYPASS;
1046 dst[0] = cpu_to_le64(val);
1047 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1048 << STRTAB_STE_1_SHCFG_SHIFT);
1049 dst[2] = 0; /* Nuke the VMID */
1051 arm_smmu_sync_ste_for_sid(smmu, sid);
1057 dst[1] = cpu_to_le64(
1058 STRTAB_STE_1_S1C_CACHE_WBRA
1059 << STRTAB_STE_1_S1CIR_SHIFT |
1060 STRTAB_STE_1_S1C_CACHE_WBRA
1061 << STRTAB_STE_1_S1COR_SHIFT |
1062 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1063 STRTAB_STE_1_S1STALLD |
1064 #ifdef CONFIG_PCI_ATS
1065 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1067 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1069 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1070 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1071 STRTAB_STE_0_CFG_S1_TRANS;
1077 dst[2] = cpu_to_le64(
1078 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1079 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1080 << STRTAB_STE_2_VTCR_SHIFT |
1082 STRTAB_STE_2_S2ENDI |
1084 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1087 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1088 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1090 val |= STRTAB_STE_0_CFG_S2_TRANS;
1093 arm_smmu_sync_ste_for_sid(smmu, sid);
1094 dst[0] = cpu_to_le64(val);
1095 arm_smmu_sync_ste_for_sid(smmu, sid);
1097 /* It's likely that we'll want to use the new STE soon */
1098 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1099 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1102 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1105 struct arm_smmu_strtab_ent ste = {
1110 for (i = 0; i < nent; ++i) {
1111 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1112 strtab += STRTAB_STE_DWORDS;
1116 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1120 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1121 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1126 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1127 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1129 desc->span = STRTAB_SPLIT + 1;
1130 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1131 GFP_KERNEL | __GFP_ZERO);
1134 "failed to allocate l2 stream table for SID %u\n",
1139 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1140 arm_smmu_write_strtab_l1_desc(strtab, desc);
1144 /* IRQ and event handlers */
1145 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1148 struct arm_smmu_device *smmu = dev;
1149 struct arm_smmu_queue *q = &smmu->evtq.q;
1150 u64 evt[EVTQ_ENT_DWORDS];
1152 while (!queue_remove_raw(q, evt)) {
1153 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1155 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1156 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1157 dev_info(smmu->dev, "\t0x%016llx\n",
1158 (unsigned long long)evt[i]);
1161 /* Sync our overflow flag, as we believe we're up to speed */
1162 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1166 static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1168 irqreturn_t ret = IRQ_WAKE_THREAD;
1169 struct arm_smmu_device *smmu = dev;
1170 struct arm_smmu_queue *q = &smmu->evtq.q;
1173 * Not much we can do on overflow, so scream and pretend we're
1176 if (queue_sync_prod(q) == -EOVERFLOW)
1177 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1178 else if (queue_empty(q))
1184 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1186 struct arm_smmu_device *smmu = dev;
1187 struct arm_smmu_queue *q = &smmu->priq.q;
1188 u64 evt[PRIQ_ENT_DWORDS];
1190 while (!queue_remove_raw(q, evt)) {
1195 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1196 ssv = evt[0] & PRIQ_0_SSID_V;
1197 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1198 last = evt[0] & PRIQ_0_PRG_LAST;
1199 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1201 dev_info(smmu->dev, "unexpected PRI request received:\n");
1203 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1204 sid, ssid, grpid, last ? "L" : "",
1205 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1206 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1207 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1208 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1209 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1212 struct arm_smmu_cmdq_ent cmd = {
1213 .opcode = CMDQ_OP_PRI_RESP,
1214 .substream_valid = ssv,
1219 .resp = PRI_RESP_DENY,
1223 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1227 /* Sync our overflow flag, as we believe we're up to speed */
1228 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1232 static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1234 irqreturn_t ret = IRQ_WAKE_THREAD;
1235 struct arm_smmu_device *smmu = dev;
1236 struct arm_smmu_queue *q = &smmu->priq.q;
1238 /* PRIQ overflow indicates a programming error */
1239 if (queue_sync_prod(q) == -EOVERFLOW)
1240 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1241 else if (queue_empty(q))
1247 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1249 /* We don't actually use CMD_SYNC interrupts for anything */
1253 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1255 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1257 u32 gerror, gerrorn;
1258 struct arm_smmu_device *smmu = dev;
1260 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1261 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1264 if (!(gerror & GERROR_ERR_MASK))
1265 return IRQ_NONE; /* No errors pending */
1268 "unexpected global error reported (0x%08x), this could be serious\n",
1271 if (gerror & GERROR_SFM_ERR) {
1272 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1273 arm_smmu_device_disable(smmu);
1276 if (gerror & GERROR_MSI_GERROR_ABT_ERR)
1277 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1279 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
1280 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1281 arm_smmu_priq_handler(irq, smmu->dev);
1284 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
1285 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1286 arm_smmu_evtq_handler(irq, smmu->dev);
1289 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
1290 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1291 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1294 if (gerror & GERROR_PRIQ_ABT_ERR)
1295 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1297 if (gerror & GERROR_EVTQ_ABT_ERR)
1298 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1300 if (gerror & GERROR_CMDQ_ERR)
1301 arm_smmu_cmdq_skip_err(smmu);
1303 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1307 /* IO_PGTABLE API */
1308 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1310 struct arm_smmu_cmdq_ent cmd;
1312 cmd.opcode = CMDQ_OP_CMD_SYNC;
1313 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1316 static void arm_smmu_tlb_sync(void *cookie)
1318 struct arm_smmu_domain *smmu_domain = cookie;
1319 __arm_smmu_tlb_sync(smmu_domain->smmu);
1322 static void arm_smmu_tlb_inv_context(void *cookie)
1324 struct arm_smmu_domain *smmu_domain = cookie;
1325 struct arm_smmu_device *smmu = smmu_domain->smmu;
1326 struct arm_smmu_cmdq_ent cmd;
1328 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1329 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1330 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1333 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1334 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1337 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1338 __arm_smmu_tlb_sync(smmu);
1341 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1342 bool leaf, void *cookie)
1344 struct arm_smmu_domain *smmu_domain = cookie;
1345 struct arm_smmu_device *smmu = smmu_domain->smmu;
1346 struct arm_smmu_cmdq_ent cmd = {
1353 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1354 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1355 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1357 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1358 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1361 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1364 static struct iommu_gather_ops arm_smmu_gather_ops = {
1365 .tlb_flush_all = arm_smmu_tlb_inv_context,
1366 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1367 .tlb_sync = arm_smmu_tlb_sync,
1371 static bool arm_smmu_capable(enum iommu_cap cap)
1374 case IOMMU_CAP_CACHE_COHERENCY:
1376 case IOMMU_CAP_INTR_REMAP:
1377 return true; /* MSIs are just memory writes */
1378 case IOMMU_CAP_NOEXEC:
1385 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1387 struct arm_smmu_domain *smmu_domain;
1389 if (type != IOMMU_DOMAIN_UNMANAGED)
1393 * Allocate the domain and initialise some of its data structures.
1394 * We can't really do anything meaningful until we've added a
1397 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1401 mutex_init(&smmu_domain->init_mutex);
1402 spin_lock_init(&smmu_domain->pgtbl_lock);
1403 return &smmu_domain->domain;
1406 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1408 int idx, size = 1 << span;
1411 idx = find_first_zero_bit(map, size);
1414 } while (test_and_set_bit(idx, map));
1419 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1421 clear_bit(idx, map);
1424 static void arm_smmu_domain_free(struct iommu_domain *domain)
1426 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1427 struct arm_smmu_device *smmu = smmu_domain->smmu;
1429 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1431 /* Free the CD and ASID, if we allocated them */
1432 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1433 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1436 dmam_free_coherent(smmu_domain->smmu->dev,
1437 CTXDESC_CD_DWORDS << 3,
1441 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1444 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1446 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1452 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1453 struct io_pgtable_cfg *pgtbl_cfg)
1457 struct arm_smmu_device *smmu = smmu_domain->smmu;
1458 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1460 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1461 if (IS_ERR_VALUE(asid))
1464 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1466 GFP_KERNEL | __GFP_ZERO);
1468 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1473 cfg->cd.asid = (u16)asid;
1474 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1475 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1476 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1480 arm_smmu_bitmap_free(smmu->asid_map, asid);
1484 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1485 struct io_pgtable_cfg *pgtbl_cfg)
1488 struct arm_smmu_device *smmu = smmu_domain->smmu;
1489 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1491 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1492 if (IS_ERR_VALUE(vmid))
1495 cfg->vmid = (u16)vmid;
1496 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1497 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1501 static struct iommu_ops arm_smmu_ops;
1503 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1506 unsigned long ias, oas;
1507 enum io_pgtable_fmt fmt;
1508 struct io_pgtable_cfg pgtbl_cfg;
1509 struct io_pgtable_ops *pgtbl_ops;
1510 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1511 struct io_pgtable_cfg *);
1512 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1513 struct arm_smmu_device *smmu = smmu_domain->smmu;
1515 /* Restrict the stage to what we can actually support */
1516 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1517 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1518 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1519 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1521 switch (smmu_domain->stage) {
1522 case ARM_SMMU_DOMAIN_S1:
1525 fmt = ARM_64_LPAE_S1;
1526 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1528 case ARM_SMMU_DOMAIN_NESTED:
1529 case ARM_SMMU_DOMAIN_S2:
1532 fmt = ARM_64_LPAE_S2;
1533 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1539 pgtbl_cfg = (struct io_pgtable_cfg) {
1540 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1543 .tlb = &arm_smmu_gather_ops,
1544 .iommu_dev = smmu->dev,
1547 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1551 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1552 smmu_domain->pgtbl_ops = pgtbl_ops;
1554 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1555 if (IS_ERR_VALUE(ret))
1556 free_io_pgtable_ops(pgtbl_ops);
1561 static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1563 struct iommu_group *group;
1564 struct arm_smmu_group *smmu_group;
1566 group = iommu_group_get(dev);
1570 smmu_group = iommu_group_get_iommudata(group);
1571 iommu_group_put(group);
1575 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1578 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1580 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1581 struct arm_smmu_strtab_l1_desc *l1_desc;
1584 /* Two-level walk */
1585 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1586 l1_desc = &cfg->l1_desc[idx];
1587 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1588 step = &l1_desc->l2ptr[idx];
1590 /* Simple linear lookup */
1591 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1597 static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1600 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1601 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1602 struct arm_smmu_device *smmu = smmu_group->smmu;
1604 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1605 ste->s1_cfg = &smmu_domain->s1_cfg;
1607 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1610 ste->s2_cfg = &smmu_domain->s2_cfg;
1613 for (i = 0; i < smmu_group->num_sids; ++i) {
1614 u32 sid = smmu_group->sids[i];
1615 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1617 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1623 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1626 struct arm_smmu_device *smmu;
1627 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1628 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1633 /* Already attached to a different domain? */
1634 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1637 smmu = smmu_group->smmu;
1638 mutex_lock(&smmu_domain->init_mutex);
1640 if (!smmu_domain->smmu) {
1641 smmu_domain->smmu = smmu;
1642 ret = arm_smmu_domain_finalise(domain);
1644 smmu_domain->smmu = NULL;
1647 } else if (smmu_domain->smmu != smmu) {
1649 "cannot attach to SMMU %s (upstream of %s)\n",
1650 dev_name(smmu_domain->smmu->dev),
1651 dev_name(smmu->dev));
1656 /* Group already attached to this domain? */
1657 if (smmu_group->domain)
1660 smmu_group->domain = smmu_domain;
1661 smmu_group->ste.bypass = false;
1663 ret = arm_smmu_install_ste_for_group(smmu_group);
1664 if (IS_ERR_VALUE(ret))
1665 smmu_group->domain = NULL;
1668 mutex_unlock(&smmu_domain->init_mutex);
1672 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1674 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1675 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1677 BUG_ON(!smmu_domain);
1678 BUG_ON(!smmu_group);
1680 mutex_lock(&smmu_domain->init_mutex);
1681 BUG_ON(smmu_group->domain != smmu_domain);
1683 smmu_group->ste.bypass = true;
1684 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1685 dev_warn(dev, "failed to install bypass STE\n");
1687 smmu_group->domain = NULL;
1688 mutex_unlock(&smmu_domain->init_mutex);
1691 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1692 phys_addr_t paddr, size_t size, int prot)
1695 unsigned long flags;
1696 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1697 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1702 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1703 ret = ops->map(ops, iova, paddr, size, prot);
1704 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1709 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1712 unsigned long flags;
1713 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1714 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1719 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1720 ret = ops->unmap(ops, iova, size);
1721 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1726 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1729 unsigned long flags;
1730 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1731 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1736 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1737 ret = ops->iova_to_phys(ops, iova);
1738 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1743 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1745 *(u32 *)sidp = alias;
1746 return 0; /* Continue walking */
1749 static void __arm_smmu_release_pci_iommudata(void *data)
1754 static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1756 struct device_node *of_node;
1757 struct platform_device *smmu_pdev;
1758 struct arm_smmu_device *smmu = NULL;
1759 struct pci_bus *bus = pdev->bus;
1761 /* Walk up to the root bus */
1762 while (!pci_is_root_bus(bus))
1765 /* Follow the "iommus" phandle from the host controller */
1766 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1770 /* See if we can find an SMMU corresponding to the phandle */
1771 smmu_pdev = of_find_device_by_node(of_node);
1773 smmu = platform_get_drvdata(smmu_pdev);
1775 of_node_put(of_node);
1779 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1781 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1783 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1784 limit *= 1UL << STRTAB_SPLIT;
1789 static int arm_smmu_add_device(struct device *dev)
1793 struct pci_dev *pdev;
1794 struct iommu_group *group;
1795 struct arm_smmu_group *smmu_group;
1796 struct arm_smmu_device *smmu;
1798 /* We only support PCI, for now */
1799 if (!dev_is_pci(dev))
1802 pdev = to_pci_dev(dev);
1803 group = iommu_group_get_for_dev(dev);
1805 return PTR_ERR(group);
1807 smmu_group = iommu_group_get_iommudata(group);
1809 smmu = arm_smmu_get_for_pci_dev(pdev);
1812 goto out_remove_dev;
1815 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1818 goto out_remove_dev;
1821 smmu_group->ste.valid = true;
1822 smmu_group->smmu = smmu;
1823 iommu_group_set_iommudata(group, smmu_group,
1824 __arm_smmu_release_pci_iommudata);
1826 smmu = smmu_group->smmu;
1829 /* Assume SID == RID until firmware tells us otherwise */
1830 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1831 for (i = 0; i < smmu_group->num_sids; ++i) {
1832 /* If we already know about this SID, then we're done */
1833 if (smmu_group->sids[i] == sid)
1837 /* Check the SID is in range of the SMMU and our stream table */
1838 if (!arm_smmu_sid_in_range(smmu, sid)) {
1840 goto out_remove_dev;
1843 /* Ensure l2 strtab is initialised */
1844 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1845 ret = arm_smmu_init_l2_strtab(smmu, sid);
1847 goto out_remove_dev;
1850 /* Resize the SID array for the group */
1851 smmu_group->num_sids++;
1852 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1855 smmu_group->num_sids--;
1857 goto out_remove_dev;
1860 /* Add the new SID */
1861 sids[smmu_group->num_sids - 1] = sid;
1862 smmu_group->sids = sids;
1865 iommu_group_put(group);
1869 iommu_group_remove_device(dev);
1870 iommu_group_put(group);
1874 static void arm_smmu_remove_device(struct device *dev)
1876 iommu_group_remove_device(dev);
1879 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1880 enum iommu_attr attr, void *data)
1882 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1885 case DOMAIN_ATTR_NESTING:
1886 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1893 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1894 enum iommu_attr attr, void *data)
1897 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1899 mutex_lock(&smmu_domain->init_mutex);
1902 case DOMAIN_ATTR_NESTING:
1903 if (smmu_domain->smmu) {
1909 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1911 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1919 mutex_unlock(&smmu_domain->init_mutex);
1923 static struct iommu_ops arm_smmu_ops = {
1924 .capable = arm_smmu_capable,
1925 .domain_alloc = arm_smmu_domain_alloc,
1926 .domain_free = arm_smmu_domain_free,
1927 .attach_dev = arm_smmu_attach_dev,
1928 .detach_dev = arm_smmu_detach_dev,
1929 .map = arm_smmu_map,
1930 .unmap = arm_smmu_unmap,
1931 .iova_to_phys = arm_smmu_iova_to_phys,
1932 .add_device = arm_smmu_add_device,
1933 .remove_device = arm_smmu_remove_device,
1934 .device_group = pci_device_group,
1935 .domain_get_attr = arm_smmu_domain_get_attr,
1936 .domain_set_attr = arm_smmu_domain_set_attr,
1937 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1940 /* Probing and initialisation functions */
1941 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1942 struct arm_smmu_queue *q,
1943 unsigned long prod_off,
1944 unsigned long cons_off,
1947 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1949 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1951 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1956 q->prod_reg = smmu->base + prod_off;
1957 q->cons_reg = smmu->base + cons_off;
1958 q->ent_dwords = dwords;
1960 q->q_base = Q_BASE_RWA;
1961 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1962 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1963 << Q_BASE_LOG2SIZE_SHIFT;
1965 q->prod = q->cons = 0;
1969 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1974 spin_lock_init(&smmu->cmdq.lock);
1975 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1976 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1981 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1982 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1987 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1990 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1991 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1994 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1997 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1998 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1999 void *strtab = smmu->strtab_cfg.strtab;
2001 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2002 if (!cfg->l1_desc) {
2003 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2007 for (i = 0; i < cfg->num_l1_ents; ++i) {
2008 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2009 strtab += STRTAB_L1_DESC_DWORDS << 3;
2015 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2020 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2023 * If we can resolve everything with a single L2 table, then we
2024 * just need a single L1 descriptor. Otherwise, calculate the L1
2025 * size, capped to the SIDSIZE.
2027 if (smmu->sid_bits < STRTAB_SPLIT) {
2030 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2031 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2033 cfg->num_l1_ents = 1 << size;
2035 size += STRTAB_SPLIT;
2036 if (size < smmu->sid_bits)
2038 "2-level strtab only covers %u/%u bits of SID\n",
2039 size, smmu->sid_bits);
2041 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2042 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2043 GFP_KERNEL | __GFP_ZERO);
2046 "failed to allocate l1 stream table (%u bytes)\n",
2050 cfg->strtab = strtab;
2052 /* Configure strtab_base_cfg for 2 levels */
2053 reg = STRTAB_BASE_CFG_FMT_2LVL;
2054 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2055 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2056 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2057 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2058 cfg->strtab_base_cfg = reg;
2060 return arm_smmu_init_l1_strtab(smmu);
2063 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2068 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2070 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2071 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2072 GFP_KERNEL | __GFP_ZERO);
2075 "failed to allocate linear stream table (%u bytes)\n",
2079 cfg->strtab = strtab;
2080 cfg->num_l1_ents = 1 << smmu->sid_bits;
2082 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2083 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2084 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2085 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2086 cfg->strtab_base_cfg = reg;
2088 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2092 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2097 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2098 ret = arm_smmu_init_strtab_2lvl(smmu);
2100 ret = arm_smmu_init_strtab_linear(smmu);
2105 /* Set the strtab base address */
2106 reg = smmu->strtab_cfg.strtab_dma &
2107 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2108 reg |= STRTAB_BASE_RA;
2109 smmu->strtab_cfg.strtab_base = reg;
2111 /* Allocate the first VMID for stage-2 bypass STEs */
2112 set_bit(0, smmu->vmid_map);
2116 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2120 ret = arm_smmu_init_queues(smmu);
2124 return arm_smmu_init_strtab(smmu);
2127 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2128 unsigned int reg_off, unsigned int ack_off)
2132 writel_relaxed(val, smmu->base + reg_off);
2133 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2134 1, ARM_SMMU_POLL_TIMEOUT_US);
2137 static void arm_smmu_free_msis(void *data)
2139 struct device *dev = data;
2140 platform_msi_domain_free_irqs(dev);
2143 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2145 phys_addr_t doorbell;
2146 struct device *dev = msi_desc_to_dev(desc);
2147 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2148 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2150 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2151 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2153 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2154 writel_relaxed(msg->data, smmu->base + cfg[1]);
2155 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2158 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2160 struct msi_desc *desc;
2161 int ret, nvec = ARM_SMMU_MAX_MSIS;
2162 struct device *dev = smmu->dev;
2164 /* Clear the MSI address regs */
2165 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2166 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2168 if (smmu->features & ARM_SMMU_FEAT_PRI)
2169 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2173 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2176 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2177 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2179 dev_warn(dev, "failed to allocate MSIs\n");
2183 for_each_msi_entry(desc, dev) {
2184 switch (desc->platform.msi_index) {
2185 case EVTQ_MSI_INDEX:
2186 smmu->evtq.q.irq = desc->irq;
2188 case GERROR_MSI_INDEX:
2189 smmu->gerr_irq = desc->irq;
2191 case PRIQ_MSI_INDEX:
2192 smmu->priq.q.irq = desc->irq;
2194 default: /* Unknown */
2199 /* Add callback to free MSIs on teardown */
2200 devm_add_action(dev, arm_smmu_free_msis, dev);
2203 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2206 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2208 /* Disable IRQs first */
2209 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2210 ARM_SMMU_IRQ_CTRLACK);
2212 dev_err(smmu->dev, "failed to disable irqs\n");
2216 arm_smmu_setup_msis(smmu);
2218 /* Request interrupt lines */
2219 irq = smmu->evtq.q.irq;
2221 ret = devm_request_threaded_irq(smmu->dev, irq,
2222 arm_smmu_evtq_handler,
2223 arm_smmu_evtq_thread,
2224 0, "arm-smmu-v3-evtq", smmu);
2225 if (IS_ERR_VALUE(ret))
2226 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2229 irq = smmu->cmdq.q.irq;
2231 ret = devm_request_irq(smmu->dev, irq,
2232 arm_smmu_cmdq_sync_handler, 0,
2233 "arm-smmu-v3-cmdq-sync", smmu);
2234 if (IS_ERR_VALUE(ret))
2235 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2238 irq = smmu->gerr_irq;
2240 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2241 0, "arm-smmu-v3-gerror", smmu);
2242 if (IS_ERR_VALUE(ret))
2243 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2246 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2247 irq = smmu->priq.q.irq;
2249 ret = devm_request_threaded_irq(smmu->dev, irq,
2250 arm_smmu_priq_handler,
2251 arm_smmu_priq_thread,
2252 0, "arm-smmu-v3-priq",
2254 if (IS_ERR_VALUE(ret))
2256 "failed to enable priq irq\n");
2258 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2262 /* Enable interrupt generation on the SMMU */
2263 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2264 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2266 dev_warn(smmu->dev, "failed to enable irqs\n");
2271 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2275 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2277 dev_err(smmu->dev, "failed to clear cr0\n");
2282 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2286 struct arm_smmu_cmdq_ent cmd;
2288 /* Clear CR0 and sync (disables SMMU and queue processing) */
2289 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2290 if (reg & CR0_SMMUEN)
2291 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2293 ret = arm_smmu_device_disable(smmu);
2297 /* CR1 (table and queue memory attributes) */
2298 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2299 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2300 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2301 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2302 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2303 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2304 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2306 /* CR2 (random crap) */
2307 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2308 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2311 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2312 smmu->base + ARM_SMMU_STRTAB_BASE);
2313 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2314 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2317 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2318 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2319 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2321 enables = CR0_CMDQEN;
2322 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2325 dev_err(smmu->dev, "failed to enable command queue\n");
2329 /* Invalidate any cached configuration */
2330 cmd.opcode = CMDQ_OP_CFGI_ALL;
2331 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2332 cmd.opcode = CMDQ_OP_CMD_SYNC;
2333 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2335 /* Invalidate any stale TLB entries */
2336 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2337 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2338 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2341 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2342 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2343 cmd.opcode = CMDQ_OP_CMD_SYNC;
2344 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2347 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2348 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2349 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2351 enables |= CR0_EVTQEN;
2352 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2355 dev_err(smmu->dev, "failed to enable event queue\n");
2360 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2361 writeq_relaxed(smmu->priq.q.q_base,
2362 smmu->base + ARM_SMMU_PRIQ_BASE);
2363 writel_relaxed(smmu->priq.q.prod,
2364 smmu->base + ARM_SMMU_PRIQ_PROD);
2365 writel_relaxed(smmu->priq.q.cons,
2366 smmu->base + ARM_SMMU_PRIQ_CONS);
2368 enables |= CR0_PRIQEN;
2369 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2372 dev_err(smmu->dev, "failed to enable PRI queue\n");
2377 ret = arm_smmu_setup_irqs(smmu);
2379 dev_err(smmu->dev, "failed to setup irqs\n");
2383 /* Enable the SMMU interface */
2384 enables |= CR0_SMMUEN;
2385 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2388 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2395 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2399 unsigned long pgsize_bitmap = 0;
2402 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2404 /* 2-level structures */
2405 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2406 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2408 if (reg & IDR0_CD2L)
2409 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2412 * Translation table endianness.
2413 * We currently require the same endianness as the CPU, but this
2414 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2416 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2417 case IDR0_TTENDIAN_MIXED:
2418 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2421 case IDR0_TTENDIAN_BE:
2422 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2425 case IDR0_TTENDIAN_LE:
2426 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2430 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2434 /* Boolean feature flags */
2435 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2436 smmu->features |= ARM_SMMU_FEAT_PRI;
2438 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2439 smmu->features |= ARM_SMMU_FEAT_ATS;
2442 smmu->features |= ARM_SMMU_FEAT_SEV;
2445 smmu->features |= ARM_SMMU_FEAT_MSI;
2448 smmu->features |= ARM_SMMU_FEAT_HYP;
2451 * The dma-coherent property is used in preference to the ID
2452 * register, but warn on mismatch.
2454 coherent = of_dma_is_coherent(smmu->dev->of_node);
2456 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2458 if (!!(reg & IDR0_COHACC) != coherent)
2459 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2460 coherent ? "true" : "false");
2462 if (reg & IDR0_STALL_MODEL)
2463 smmu->features |= ARM_SMMU_FEAT_STALLS;
2466 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2469 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2471 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2472 dev_err(smmu->dev, "no translation support!\n");
2476 /* We only support the AArch64 table format at present */
2477 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2478 case IDR0_TTF_AARCH32_64:
2481 case IDR0_TTF_AARCH64:
2484 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2488 /* ASID/VMID sizes */
2489 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2490 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2493 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2494 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2495 dev_err(smmu->dev, "embedded implementation not supported\n");
2499 /* Queue sizes, capped at 4k */
2500 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2501 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2502 if (!smmu->cmdq.q.max_n_shift) {
2503 /* Odd alignment restrictions on the base, so ignore for now */
2504 dev_err(smmu->dev, "unit-length command queue not supported\n");
2508 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2509 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2510 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2511 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2513 /* SID/SSID sizes */
2514 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2515 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2518 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2520 /* Maximum number of outstanding stalls */
2521 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2522 & IDR5_STALL_MAX_MASK;
2525 if (reg & IDR5_GRAN64K)
2526 pgsize_bitmap |= SZ_64K | SZ_512M;
2527 if (reg & IDR5_GRAN16K)
2528 pgsize_bitmap |= SZ_16K | SZ_32M;
2529 if (reg & IDR5_GRAN4K)
2530 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2532 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2534 /* Output address size */
2535 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2536 case IDR5_OAS_32_BIT:
2539 case IDR5_OAS_36_BIT:
2542 case IDR5_OAS_40_BIT:
2545 case IDR5_OAS_42_BIT:
2548 case IDR5_OAS_44_BIT:
2553 "unknown output address size. Truncating to 48-bit\n");
2555 case IDR5_OAS_48_BIT:
2559 /* Set the DMA mask for our table walker */
2560 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2562 "failed to set DMA mask for table walker\n");
2564 smmu->ias = max(smmu->ias, smmu->oas);
2566 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2567 smmu->ias, smmu->oas, smmu->features);
2571 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2574 struct resource *res;
2575 struct arm_smmu_device *smmu;
2576 struct device *dev = &pdev->dev;
2578 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2580 dev_err(dev, "failed to allocate arm_smmu_device\n");
2586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2587 if (resource_size(res) + 1 < SZ_128K) {
2588 dev_err(dev, "MMIO region too small (%pr)\n", res);
2592 smmu->base = devm_ioremap_resource(dev, res);
2593 if (IS_ERR(smmu->base))
2594 return PTR_ERR(smmu->base);
2596 /* Interrupt lines */
2597 irq = platform_get_irq_byname(pdev, "eventq");
2599 smmu->evtq.q.irq = irq;
2601 irq = platform_get_irq_byname(pdev, "priq");
2603 smmu->priq.q.irq = irq;
2605 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2607 smmu->cmdq.q.irq = irq;
2609 irq = platform_get_irq_byname(pdev, "gerror");
2611 smmu->gerr_irq = irq;
2613 parse_driver_options(smmu);
2616 ret = arm_smmu_device_probe(smmu);
2620 /* Initialise in-memory data structures */
2621 ret = arm_smmu_init_structures(smmu);
2625 /* Record our private device structure */
2626 platform_set_drvdata(pdev, smmu);
2628 /* Reset the device */
2629 return arm_smmu_device_reset(smmu);
2632 static int arm_smmu_device_remove(struct platform_device *pdev)
2634 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2636 arm_smmu_device_disable(smmu);
2640 static struct of_device_id arm_smmu_of_match[] = {
2641 { .compatible = "arm,smmu-v3", },
2644 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2646 static struct platform_driver arm_smmu_driver = {
2648 .name = "arm-smmu-v3",
2649 .of_match_table = of_match_ptr(arm_smmu_of_match),
2651 .probe = arm_smmu_device_dt_probe,
2652 .remove = arm_smmu_device_remove,
2655 static int __init arm_smmu_init(void)
2657 struct device_node *np;
2660 np = of_find_matching_node(NULL, arm_smmu_of_match);
2666 ret = platform_driver_register(&arm_smmu_driver);
2670 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2673 static void __exit arm_smmu_exit(void)
2675 return platform_driver_unregister(&arm_smmu_driver);
2678 subsys_initcall(arm_smmu_init);
2679 module_exit(arm_smmu_exit);
2681 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2682 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2683 MODULE_LICENSE("GPL v2");