2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/interrupt.h>
26 #include <linux/iommu.h>
27 #include <linux/iopoll.h>
28 #include <linux/module.h>
30 #include <linux/of_address.h>
31 #include <linux/pci.h>
32 #include <linux/platform_device.h>
34 #include "io-pgtable.h"
37 #define ARM_SMMU_IDR0 0x0
38 #define IDR0_ST_LVL_SHIFT 27
39 #define IDR0_ST_LVL_MASK 0x3
40 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
41 #define IDR0_STALL_MODEL (3 << 24)
42 #define IDR0_TTENDIAN_SHIFT 21
43 #define IDR0_TTENDIAN_MASK 0x3
44 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
45 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
46 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
47 #define IDR0_CD2L (1 << 19)
48 #define IDR0_VMID16 (1 << 18)
49 #define IDR0_PRI (1 << 16)
50 #define IDR0_SEV (1 << 14)
51 #define IDR0_MSI (1 << 13)
52 #define IDR0_ASID16 (1 << 12)
53 #define IDR0_ATS (1 << 10)
54 #define IDR0_HYP (1 << 9)
55 #define IDR0_COHACC (1 << 4)
56 #define IDR0_TTF_SHIFT 2
57 #define IDR0_TTF_MASK 0x3
58 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
59 #define IDR0_S1P (1 << 1)
60 #define IDR0_S2P (1 << 0)
62 #define ARM_SMMU_IDR1 0x4
63 #define IDR1_TABLES_PRESET (1 << 30)
64 #define IDR1_QUEUES_PRESET (1 << 29)
65 #define IDR1_REL (1 << 28)
66 #define IDR1_CMDQ_SHIFT 21
67 #define IDR1_CMDQ_MASK 0x1f
68 #define IDR1_EVTQ_SHIFT 16
69 #define IDR1_EVTQ_MASK 0x1f
70 #define IDR1_PRIQ_SHIFT 11
71 #define IDR1_PRIQ_MASK 0x1f
72 #define IDR1_SSID_SHIFT 6
73 #define IDR1_SSID_MASK 0x1f
74 #define IDR1_SID_SHIFT 0
75 #define IDR1_SID_MASK 0x3f
77 #define ARM_SMMU_IDR5 0x14
78 #define IDR5_STALL_MAX_SHIFT 16
79 #define IDR5_STALL_MAX_MASK 0xffff
80 #define IDR5_GRAN64K (1 << 6)
81 #define IDR5_GRAN16K (1 << 5)
82 #define IDR5_GRAN4K (1 << 4)
83 #define IDR5_OAS_SHIFT 0
84 #define IDR5_OAS_MASK 0x7
85 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
86 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
87 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
88 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
89 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
90 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
92 #define ARM_SMMU_CR0 0x20
93 #define CR0_CMDQEN (1 << 3)
94 #define CR0_EVTQEN (1 << 2)
95 #define CR0_PRIQEN (1 << 1)
96 #define CR0_SMMUEN (1 << 0)
98 #define ARM_SMMU_CR0ACK 0x24
100 #define ARM_SMMU_CR1 0x28
104 #define CR1_CACHE_NC 0
105 #define CR1_CACHE_WB 1
106 #define CR1_CACHE_WT 2
107 #define CR1_TABLE_SH_SHIFT 10
108 #define CR1_TABLE_OC_SHIFT 8
109 #define CR1_TABLE_IC_SHIFT 6
110 #define CR1_QUEUE_SH_SHIFT 4
111 #define CR1_QUEUE_OC_SHIFT 2
112 #define CR1_QUEUE_IC_SHIFT 0
114 #define ARM_SMMU_CR2 0x2c
115 #define CR2_PTM (1 << 2)
116 #define CR2_RECINVSID (1 << 1)
117 #define CR2_E2H (1 << 0)
119 #define ARM_SMMU_IRQ_CTRL 0x50
120 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
121 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
123 #define ARM_SMMU_IRQ_CTRLACK 0x54
125 #define ARM_SMMU_GERROR 0x60
126 #define GERROR_SFM_ERR (1 << 8)
127 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
128 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
129 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
130 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
131 #define GERROR_PRIQ_ABT_ERR (1 << 3)
132 #define GERROR_EVTQ_ABT_ERR (1 << 2)
133 #define GERROR_CMDQ_ERR (1 << 0)
134 #define GERROR_ERR_MASK 0xfd
136 #define ARM_SMMU_GERRORN 0x64
138 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
139 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
140 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
142 #define ARM_SMMU_STRTAB_BASE 0x80
143 #define STRTAB_BASE_RA (1UL << 62)
144 #define STRTAB_BASE_ADDR_SHIFT 6
145 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
147 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
148 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
149 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
150 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
151 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
152 #define STRTAB_BASE_CFG_FMT_SHIFT 16
153 #define STRTAB_BASE_CFG_FMT_MASK 0x3
154 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
155 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
157 #define ARM_SMMU_CMDQ_BASE 0x90
158 #define ARM_SMMU_CMDQ_PROD 0x98
159 #define ARM_SMMU_CMDQ_CONS 0x9c
161 #define ARM_SMMU_EVTQ_BASE 0xa0
162 #define ARM_SMMU_EVTQ_PROD 0x100a8
163 #define ARM_SMMU_EVTQ_CONS 0x100ac
164 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
165 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
166 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
168 #define ARM_SMMU_PRIQ_BASE 0xc0
169 #define ARM_SMMU_PRIQ_PROD 0x100c8
170 #define ARM_SMMU_PRIQ_CONS 0x100cc
171 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
172 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
173 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
175 /* Common MSI config fields */
176 #define MSI_CFG0_SH_SHIFT 60
177 #define MSI_CFG0_SH_NSH (0UL << MSI_CFG0_SH_SHIFT)
178 #define MSI_CFG0_SH_OSH (2UL << MSI_CFG0_SH_SHIFT)
179 #define MSI_CFG0_SH_ISH (3UL << MSI_CFG0_SH_SHIFT)
180 #define MSI_CFG0_MEMATTR_SHIFT 56
181 #define MSI_CFG0_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG0_MEMATTR_SHIFT)
182 #define MSI_CFG0_ADDR_SHIFT 2
183 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
185 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
186 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
187 #define Q_OVERFLOW_FLAG (1 << 31)
188 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
189 #define Q_ENT(q, p) ((q)->base + \
190 Q_IDX(q, p) * (q)->ent_dwords)
192 #define Q_BASE_RWA (1UL << 62)
193 #define Q_BASE_ADDR_SHIFT 5
194 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
195 #define Q_BASE_LOG2SIZE_SHIFT 0
196 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
202 * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
204 #define STRTAB_L1_SZ_SHIFT 16
205 #define STRTAB_SPLIT 8
207 #define STRTAB_L1_DESC_DWORDS 1
208 #define STRTAB_L1_DESC_SPAN_SHIFT 0
209 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
210 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
211 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
213 #define STRTAB_STE_DWORDS 8
214 #define STRTAB_STE_0_V (1UL << 0)
215 #define STRTAB_STE_0_CFG_SHIFT 1
216 #define STRTAB_STE_0_CFG_MASK 0x7UL
217 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
218 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
219 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
220 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
222 #define STRTAB_STE_0_S1FMT_SHIFT 4
223 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
224 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
225 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
226 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
227 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
229 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
230 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
231 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
232 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
233 #define STRTAB_STE_1_S1C_SH_NSH 0UL
234 #define STRTAB_STE_1_S1C_SH_OSH 2UL
235 #define STRTAB_STE_1_S1C_SH_ISH 3UL
236 #define STRTAB_STE_1_S1CIR_SHIFT 2
237 #define STRTAB_STE_1_S1COR_SHIFT 4
238 #define STRTAB_STE_1_S1CSH_SHIFT 6
240 #define STRTAB_STE_1_S1STALLD (1UL << 27)
242 #define STRTAB_STE_1_EATS_ABT 0UL
243 #define STRTAB_STE_1_EATS_TRANS 1UL
244 #define STRTAB_STE_1_EATS_S1CHK 2UL
245 #define STRTAB_STE_1_EATS_SHIFT 28
247 #define STRTAB_STE_1_STRW_NSEL1 0UL
248 #define STRTAB_STE_1_STRW_EL2 2UL
249 #define STRTAB_STE_1_STRW_SHIFT 30
251 #define STRTAB_STE_2_S2VMID_SHIFT 0
252 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
253 #define STRTAB_STE_2_VTCR_SHIFT 32
254 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
255 #define STRTAB_STE_2_S2AA64 (1UL << 51)
256 #define STRTAB_STE_2_S2ENDI (1UL << 52)
257 #define STRTAB_STE_2_S2PTW (1UL << 54)
258 #define STRTAB_STE_2_S2R (1UL << 58)
260 #define STRTAB_STE_3_S2TTB_SHIFT 4
261 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
263 /* Context descriptor (stage-1 only) */
264 #define CTXDESC_CD_DWORDS 8
265 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
266 #define ARM64_TCR_T0SZ_SHIFT 0
267 #define ARM64_TCR_T0SZ_MASK 0x1fUL
268 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
269 #define ARM64_TCR_TG0_SHIFT 14
270 #define ARM64_TCR_TG0_MASK 0x3UL
271 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
272 #define ARM64_TCR_IRGN0_SHIFT 24
273 #define ARM64_TCR_IRGN0_MASK 0x3UL
274 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
275 #define ARM64_TCR_ORGN0_SHIFT 26
276 #define ARM64_TCR_ORGN0_MASK 0x3UL
277 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
278 #define ARM64_TCR_SH0_SHIFT 12
279 #define ARM64_TCR_SH0_MASK 0x3UL
280 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
281 #define ARM64_TCR_EPD0_SHIFT 7
282 #define ARM64_TCR_EPD0_MASK 0x1UL
283 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
284 #define ARM64_TCR_EPD1_SHIFT 23
285 #define ARM64_TCR_EPD1_MASK 0x1UL
287 #define CTXDESC_CD_0_ENDI (1UL << 15)
288 #define CTXDESC_CD_0_V (1UL << 31)
290 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
291 #define ARM64_TCR_IPS_SHIFT 32
292 #define ARM64_TCR_IPS_MASK 0x7UL
293 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
294 #define ARM64_TCR_TBI0_SHIFT 37
295 #define ARM64_TCR_TBI0_MASK 0x1UL
297 #define CTXDESC_CD_0_AA64 (1UL << 41)
298 #define CTXDESC_CD_0_R (1UL << 45)
299 #define CTXDESC_CD_0_A (1UL << 46)
300 #define CTXDESC_CD_0_ASET_SHIFT 47
301 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
302 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
303 #define CTXDESC_CD_0_ASID_SHIFT 48
304 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
306 #define CTXDESC_CD_1_TTB0_SHIFT 4
307 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
309 #define CTXDESC_CD_3_MAIR_SHIFT 0
311 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
312 #define ARM_SMMU_TCR2CD(tcr, fld) \
313 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
314 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
317 #define CMDQ_ENT_DWORDS 2
318 #define CMDQ_MAX_SZ_SHIFT 8
320 #define CMDQ_ERR_SHIFT 24
321 #define CMDQ_ERR_MASK 0x7f
322 #define CMDQ_ERR_CERROR_NONE_IDX 0
323 #define CMDQ_ERR_CERROR_ILL_IDX 1
324 #define CMDQ_ERR_CERROR_ABT_IDX 2
326 #define CMDQ_0_OP_SHIFT 0
327 #define CMDQ_0_OP_MASK 0xffUL
328 #define CMDQ_0_SSV (1UL << 11)
330 #define CMDQ_PREFETCH_0_SID_SHIFT 32
331 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
332 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
334 #define CMDQ_CFGI_0_SID_SHIFT 32
335 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
336 #define CMDQ_CFGI_1_LEAF (1UL << 0)
337 #define CMDQ_CFGI_1_RANGE_SHIFT 0
338 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
340 #define CMDQ_TLBI_0_VMID_SHIFT 32
341 #define CMDQ_TLBI_0_ASID_SHIFT 48
342 #define CMDQ_TLBI_1_LEAF (1UL << 0)
343 #define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL
345 #define CMDQ_PRI_0_SSID_SHIFT 12
346 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
347 #define CMDQ_PRI_0_SID_SHIFT 32
348 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
349 #define CMDQ_PRI_1_GRPID_SHIFT 0
350 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
351 #define CMDQ_PRI_1_RESP_SHIFT 12
352 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
353 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
354 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
356 #define CMDQ_SYNC_0_CS_SHIFT 12
357 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
358 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
361 #define EVTQ_ENT_DWORDS 4
362 #define EVTQ_MAX_SZ_SHIFT 7
364 #define EVTQ_0_ID_SHIFT 0
365 #define EVTQ_0_ID_MASK 0xffUL
368 #define PRIQ_ENT_DWORDS 2
369 #define PRIQ_MAX_SZ_SHIFT 8
371 #define PRIQ_0_SID_SHIFT 0
372 #define PRIQ_0_SID_MASK 0xffffffffUL
373 #define PRIQ_0_SSID_SHIFT 32
374 #define PRIQ_0_SSID_MASK 0xfffffUL
375 #define PRIQ_0_OF (1UL << 57)
376 #define PRIQ_0_PERM_PRIV (1UL << 58)
377 #define PRIQ_0_PERM_EXEC (1UL << 59)
378 #define PRIQ_0_PERM_READ (1UL << 60)
379 #define PRIQ_0_PERM_WRITE (1UL << 61)
380 #define PRIQ_0_PRG_LAST (1UL << 62)
381 #define PRIQ_0_SSID_V (1UL << 63)
383 #define PRIQ_1_PRG_IDX_SHIFT 0
384 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
385 #define PRIQ_1_ADDR_SHIFT 12
386 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
388 /* High-level queue structures */
389 #define ARM_SMMU_POLL_TIMEOUT_US 100
391 static bool disable_bypass;
392 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
393 MODULE_PARM_DESC(disable_bypass,
394 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
402 struct arm_smmu_cmdq_ent {
405 bool substream_valid;
407 /* Command-specific fields */
409 #define CMDQ_OP_PREFETCH_CFG 0x1
416 #define CMDQ_OP_CFGI_STE 0x3
417 #define CMDQ_OP_CFGI_ALL 0x4
426 #define CMDQ_OP_TLBI_NH_ASID 0x11
427 #define CMDQ_OP_TLBI_NH_VA 0x12
428 #define CMDQ_OP_TLBI_EL2_ALL 0x20
429 #define CMDQ_OP_TLBI_S12_VMALL 0x28
430 #define CMDQ_OP_TLBI_S2_IPA 0x2a
431 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
439 #define CMDQ_OP_PRI_RESP 0x41
447 #define CMDQ_OP_CMD_SYNC 0x46
451 struct arm_smmu_queue {
452 int irq; /* Wired interrupt */
463 u32 __iomem *prod_reg;
464 u32 __iomem *cons_reg;
467 struct arm_smmu_cmdq {
468 struct arm_smmu_queue q;
472 struct arm_smmu_evtq {
473 struct arm_smmu_queue q;
477 struct arm_smmu_priq {
478 struct arm_smmu_queue q;
481 /* High-level stream table and context descriptor structures */
482 struct arm_smmu_strtab_l1_desc {
486 dma_addr_t l2ptr_dma;
489 struct arm_smmu_s1_cfg {
491 dma_addr_t cdptr_dma;
493 struct arm_smmu_ctx_desc {
501 struct arm_smmu_s2_cfg {
507 struct arm_smmu_strtab_ent {
510 bool bypass; /* Overrides s1/s2 config */
511 struct arm_smmu_s1_cfg *s1_cfg;
512 struct arm_smmu_s2_cfg *s2_cfg;
515 struct arm_smmu_strtab_cfg {
517 dma_addr_t strtab_dma;
518 struct arm_smmu_strtab_l1_desc *l1_desc;
519 unsigned int num_l1_ents;
525 /* An SMMUv3 instance */
526 struct arm_smmu_device {
530 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
531 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
532 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
533 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
534 #define ARM_SMMU_FEAT_PRI (1 << 4)
535 #define ARM_SMMU_FEAT_ATS (1 << 5)
536 #define ARM_SMMU_FEAT_SEV (1 << 6)
537 #define ARM_SMMU_FEAT_MSI (1 << 7)
538 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
539 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
540 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
541 #define ARM_SMMU_FEAT_STALLS (1 << 11)
542 #define ARM_SMMU_FEAT_HYP (1 << 12)
545 struct arm_smmu_cmdq cmdq;
546 struct arm_smmu_evtq evtq;
547 struct arm_smmu_priq priq;
551 unsigned long ias; /* IPA */
552 unsigned long oas; /* PA */
554 #define ARM_SMMU_MAX_ASIDS (1 << 16)
555 unsigned int asid_bits;
556 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
558 #define ARM_SMMU_MAX_VMIDS (1 << 16)
559 unsigned int vmid_bits;
560 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
562 unsigned int ssid_bits;
563 unsigned int sid_bits;
565 struct arm_smmu_strtab_cfg strtab_cfg;
566 struct list_head list;
569 /* SMMU private data for an IOMMU group */
570 struct arm_smmu_group {
571 struct arm_smmu_device *smmu;
572 struct arm_smmu_domain *domain;
575 struct arm_smmu_strtab_ent ste;
578 /* SMMU private data for an IOMMU domain */
579 enum arm_smmu_domain_stage {
580 ARM_SMMU_DOMAIN_S1 = 0,
582 ARM_SMMU_DOMAIN_NESTED,
585 struct arm_smmu_domain {
586 struct arm_smmu_device *smmu;
587 struct mutex init_mutex; /* Protects smmu pointer */
589 struct io_pgtable_ops *pgtbl_ops;
590 spinlock_t pgtbl_lock;
592 enum arm_smmu_domain_stage stage;
594 struct arm_smmu_s1_cfg s1_cfg;
595 struct arm_smmu_s2_cfg s2_cfg;
598 struct iommu_domain domain;
601 /* Our list of SMMU instances */
602 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
603 static LIST_HEAD(arm_smmu_devices);
605 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
607 return container_of(dom, struct arm_smmu_domain, domain);
610 /* Low-level queue manipulation functions */
611 static bool queue_full(struct arm_smmu_queue *q)
613 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
614 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
617 static bool queue_empty(struct arm_smmu_queue *q)
619 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
620 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
623 static void queue_sync_cons(struct arm_smmu_queue *q)
625 q->cons = readl_relaxed(q->cons_reg);
628 static void queue_inc_cons(struct arm_smmu_queue *q)
630 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
632 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
633 writel(q->cons, q->cons_reg);
636 static int queue_sync_prod(struct arm_smmu_queue *q)
639 u32 prod = readl_relaxed(q->prod_reg);
641 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
648 static void queue_inc_prod(struct arm_smmu_queue *q)
650 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
652 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
653 writel(q->prod, q->prod_reg);
656 static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
658 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
659 return Q_IDX(q, q->cons) < Q_IDX(q, until);
661 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
664 static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
666 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
668 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
669 if (ktime_compare(ktime_get(), timeout) > 0)
683 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
687 for (i = 0; i < n_dwords; ++i)
688 *dst++ = cpu_to_le64(*src++);
691 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
696 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
701 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
705 for (i = 0; i < n_dwords; ++i)
706 *dst++ = le64_to_cpu(*src++);
709 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
714 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
719 /* High-level queue accessors */
720 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
722 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
723 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
725 switch (ent->opcode) {
726 case CMDQ_OP_TLBI_EL2_ALL:
727 case CMDQ_OP_TLBI_NSNH_ALL:
729 case CMDQ_OP_PREFETCH_CFG:
730 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
731 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
732 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
734 case CMDQ_OP_CFGI_STE:
735 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
736 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
738 case CMDQ_OP_CFGI_ALL:
739 /* Cover the entire SID range */
740 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
742 case CMDQ_OP_TLBI_NH_VA:
743 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
745 case CMDQ_OP_TLBI_S2_IPA:
746 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
747 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
748 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK;
750 case CMDQ_OP_TLBI_NH_ASID:
751 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
753 case CMDQ_OP_TLBI_S12_VMALL:
754 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
756 case CMDQ_OP_PRI_RESP:
757 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
758 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
759 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
760 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
761 switch (ent->pri.resp) {
763 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
766 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
769 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
775 case CMDQ_OP_CMD_SYNC:
776 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
785 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
787 static const char *cerror_str[] = {
788 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
789 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
790 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
794 u64 cmd[CMDQ_ENT_DWORDS];
795 struct arm_smmu_queue *q = &smmu->cmdq.q;
796 u32 cons = readl_relaxed(q->cons_reg);
797 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
798 struct arm_smmu_cmdq_ent cmd_sync = {
799 .opcode = CMDQ_OP_CMD_SYNC,
802 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
806 case CMDQ_ERR_CERROR_ILL_IDX:
808 case CMDQ_ERR_CERROR_ABT_IDX:
809 dev_err(smmu->dev, "retrying command fetch\n");
810 case CMDQ_ERR_CERROR_NONE_IDX:
815 * We may have concurrent producers, so we need to be careful
816 * not to touch any of the shadow cmdq state.
818 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
819 dev_err(smmu->dev, "skipping command in error state:\n");
820 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
821 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
823 /* Convert the erroneous command into a CMD_SYNC */
824 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
825 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
829 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
832 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
833 struct arm_smmu_cmdq_ent *ent)
836 u64 cmd[CMDQ_ENT_DWORDS];
837 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
838 struct arm_smmu_queue *q = &smmu->cmdq.q;
840 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
841 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
846 spin_lock(&smmu->cmdq.lock);
847 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
849 * Keep the queue locked, otherwise the producer could wrap
850 * twice and we could see a future consumer pointer that looks
851 * like it's behind us.
853 if (queue_poll_cons(q, until, wfe))
854 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
857 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
858 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
859 spin_unlock(&smmu->cmdq.lock);
862 /* Context descriptor manipulation functions */
863 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
867 /* Repack the TCR. Just care about TTBR0 for now */
868 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
869 val |= ARM_SMMU_TCR2CD(tcr, TG0);
870 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
871 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
872 val |= ARM_SMMU_TCR2CD(tcr, SH0);
873 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
874 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
875 val |= ARM_SMMU_TCR2CD(tcr, IPS);
876 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
881 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
882 struct arm_smmu_s1_cfg *cfg)
887 * We don't need to issue any invalidation here, as we'll invalidate
888 * the STE when installing the new entry anyway.
890 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
894 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
895 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
897 cfg->cdptr[0] = cpu_to_le64(val);
899 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
900 cfg->cdptr[1] = cpu_to_le64(val);
902 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
905 /* Stream table manipulation functions */
907 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
911 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
912 << STRTAB_L1_DESC_SPAN_SHIFT;
913 val |= desc->l2ptr_dma &
914 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
916 *dst = cpu_to_le64(val);
919 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
921 struct arm_smmu_cmdq_ent cmd = {
922 .opcode = CMDQ_OP_CFGI_STE,
929 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
930 cmd.opcode = CMDQ_OP_CMD_SYNC;
931 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
934 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
935 __le64 *dst, struct arm_smmu_strtab_ent *ste)
938 * This is hideously complicated, but we only really care about
939 * three cases at the moment:
941 * 1. Invalid (all zero) -> bypass (init)
942 * 2. Bypass -> translation (attach)
943 * 3. Translation -> bypass (detach)
945 * Given that we can't update the STE atomically and the SMMU
946 * doesn't read the thing in a defined order, that leaves us
947 * with the following maintenance requirements:
949 * 1. Update Config, return (init time STEs aren't live)
950 * 2. Write everything apart from dword 0, sync, write dword 0, sync
951 * 3. Update Config, sync
953 u64 val = le64_to_cpu(dst[0]);
954 bool ste_live = false;
955 struct arm_smmu_cmdq_ent prefetch_cmd = {
956 .opcode = CMDQ_OP_PREFETCH_CFG,
962 if (val & STRTAB_STE_0_V) {
965 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
967 case STRTAB_STE_0_CFG_BYPASS:
969 case STRTAB_STE_0_CFG_S1_TRANS:
970 case STRTAB_STE_0_CFG_S2_TRANS:
974 BUG(); /* STE corruption */
978 /* Nuke the existing Config, as we're going to rewrite it */
979 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
982 val |= STRTAB_STE_0_V;
984 val &= ~STRTAB_STE_0_V;
987 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
988 : STRTAB_STE_0_CFG_BYPASS;
989 dst[0] = cpu_to_le64(val);
990 dst[2] = 0; /* Nuke the VMID */
992 arm_smmu_sync_ste_for_sid(smmu, sid);
998 dst[1] = cpu_to_le64(
999 STRTAB_STE_1_S1C_CACHE_WBRA
1000 << STRTAB_STE_1_S1CIR_SHIFT |
1001 STRTAB_STE_1_S1C_CACHE_WBRA
1002 << STRTAB_STE_1_S1COR_SHIFT |
1003 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1004 STRTAB_STE_1_S1STALLD |
1005 #ifdef CONFIG_PCI_ATS
1006 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1008 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1010 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1011 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1012 STRTAB_STE_0_CFG_S1_TRANS;
1018 dst[2] = cpu_to_le64(
1019 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1020 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1021 << STRTAB_STE_2_VTCR_SHIFT |
1023 STRTAB_STE_2_S2ENDI |
1025 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1028 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1029 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1031 val |= STRTAB_STE_0_CFG_S2_TRANS;
1034 arm_smmu_sync_ste_for_sid(smmu, sid);
1035 dst[0] = cpu_to_le64(val);
1036 arm_smmu_sync_ste_for_sid(smmu, sid);
1038 /* It's likely that we'll want to use the new STE soon */
1039 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1042 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1045 struct arm_smmu_strtab_ent ste = {
1050 for (i = 0; i < nent; ++i) {
1051 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1052 strtab += STRTAB_STE_DWORDS;
1056 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1060 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1061 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1066 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1067 strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
1069 desc->span = STRTAB_SPLIT + 1;
1070 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1074 "failed to allocate l2 stream table for SID %u\n",
1079 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1080 arm_smmu_write_strtab_l1_desc(strtab, desc);
1084 /* IRQ and event handlers */
1085 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1088 struct arm_smmu_device *smmu = dev;
1089 struct arm_smmu_queue *q = &smmu->evtq.q;
1090 u64 evt[EVTQ_ENT_DWORDS];
1092 while (!queue_remove_raw(q, evt)) {
1093 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1095 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1096 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1097 dev_info(smmu->dev, "\t0x%016llx\n",
1098 (unsigned long long)evt[i]);
1101 /* Sync our overflow flag, as we believe we're up to speed */
1102 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1106 static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1108 irqreturn_t ret = IRQ_WAKE_THREAD;
1109 struct arm_smmu_device *smmu = dev;
1110 struct arm_smmu_queue *q = &smmu->evtq.q;
1113 * Not much we can do on overflow, so scream and pretend we're
1116 if (queue_sync_prod(q) == -EOVERFLOW)
1117 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1118 else if (queue_empty(q))
1124 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1126 struct arm_smmu_device *smmu = dev;
1127 struct arm_smmu_queue *q = &smmu->priq.q;
1128 u64 evt[PRIQ_ENT_DWORDS];
1130 while (!queue_remove_raw(q, evt)) {
1135 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1136 ssv = evt[0] & PRIQ_0_SSID_V;
1137 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1138 last = evt[0] & PRIQ_0_PRG_LAST;
1139 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1141 dev_info(smmu->dev, "unexpected PRI request received:\n");
1143 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1144 sid, ssid, grpid, last ? "L" : "",
1145 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1146 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1147 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1148 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1149 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1152 struct arm_smmu_cmdq_ent cmd = {
1153 .opcode = CMDQ_OP_PRI_RESP,
1154 .substream_valid = ssv,
1159 .resp = PRI_RESP_DENY,
1163 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1167 /* Sync our overflow flag, as we believe we're up to speed */
1168 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1172 static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1174 irqreturn_t ret = IRQ_WAKE_THREAD;
1175 struct arm_smmu_device *smmu = dev;
1176 struct arm_smmu_queue *q = &smmu->priq.q;
1178 /* PRIQ overflow indicates a programming error */
1179 if (queue_sync_prod(q) == -EOVERFLOW)
1180 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1181 else if (queue_empty(q))
1187 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1189 /* We don't actually use CMD_SYNC interrupts for anything */
1193 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1195 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1197 u32 gerror, gerrorn;
1198 struct arm_smmu_device *smmu = dev;
1200 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1201 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1204 if (!(gerror & GERROR_ERR_MASK))
1205 return IRQ_NONE; /* No errors pending */
1208 "unexpected global error reported (0x%08x), this could be serious\n",
1211 if (gerror & GERROR_SFM_ERR) {
1212 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1213 arm_smmu_device_disable(smmu);
1216 if (gerror & GERROR_MSI_GERROR_ABT_ERR)
1217 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1219 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
1220 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1221 arm_smmu_priq_handler(irq, smmu->dev);
1224 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
1225 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1226 arm_smmu_evtq_handler(irq, smmu->dev);
1229 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
1230 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1231 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1234 if (gerror & GERROR_PRIQ_ABT_ERR)
1235 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1237 if (gerror & GERROR_EVTQ_ABT_ERR)
1238 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1240 if (gerror & GERROR_CMDQ_ERR)
1241 arm_smmu_cmdq_skip_err(smmu);
1243 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1247 /* IO_PGTABLE API */
1248 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1250 struct arm_smmu_cmdq_ent cmd;
1252 cmd.opcode = CMDQ_OP_CMD_SYNC;
1253 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1256 static void arm_smmu_tlb_sync(void *cookie)
1258 struct arm_smmu_domain *smmu_domain = cookie;
1259 __arm_smmu_tlb_sync(smmu_domain->smmu);
1262 static void arm_smmu_tlb_inv_context(void *cookie)
1264 struct arm_smmu_domain *smmu_domain = cookie;
1265 struct arm_smmu_device *smmu = smmu_domain->smmu;
1266 struct arm_smmu_cmdq_ent cmd;
1268 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1269 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1270 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1273 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1274 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1277 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1278 __arm_smmu_tlb_sync(smmu);
1281 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1282 bool leaf, void *cookie)
1284 struct arm_smmu_domain *smmu_domain = cookie;
1285 struct arm_smmu_device *smmu = smmu_domain->smmu;
1286 struct arm_smmu_cmdq_ent cmd = {
1293 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1294 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1295 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1297 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1298 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1301 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1304 static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
1306 struct arm_smmu_domain *smmu_domain = cookie;
1307 struct arm_smmu_device *smmu = smmu_domain->smmu;
1308 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1310 if (smmu->features & ARM_SMMU_FEAT_COHERENCY) {
1313 dma_addr_t dma_addr;
1314 struct device *dev = smmu->dev;
1316 dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size,
1319 if (dma_mapping_error(dev, dma_addr))
1320 dev_err(dev, "failed to flush pgtable at %p\n", addr);
1322 dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE);
1326 static struct iommu_gather_ops arm_smmu_gather_ops = {
1327 .tlb_flush_all = arm_smmu_tlb_inv_context,
1328 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1329 .tlb_sync = arm_smmu_tlb_sync,
1330 .flush_pgtable = arm_smmu_flush_pgtable,
1334 static bool arm_smmu_capable(enum iommu_cap cap)
1337 case IOMMU_CAP_CACHE_COHERENCY:
1339 case IOMMU_CAP_INTR_REMAP:
1340 return true; /* MSIs are just memory writes */
1341 case IOMMU_CAP_NOEXEC:
1348 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1350 struct arm_smmu_domain *smmu_domain;
1352 if (type != IOMMU_DOMAIN_UNMANAGED)
1356 * Allocate the domain and initialise some of its data structures.
1357 * We can't really do anything meaningful until we've added a
1360 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1364 mutex_init(&smmu_domain->init_mutex);
1365 spin_lock_init(&smmu_domain->pgtbl_lock);
1366 return &smmu_domain->domain;
1369 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1371 int idx, size = 1 << span;
1374 idx = find_first_zero_bit(map, size);
1377 } while (test_and_set_bit(idx, map));
1382 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1384 clear_bit(idx, map);
1387 static void arm_smmu_domain_free(struct iommu_domain *domain)
1389 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1390 struct arm_smmu_device *smmu = smmu_domain->smmu;
1392 if (smmu_domain->pgtbl_ops)
1393 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1395 /* Free the CD and ASID, if we allocated them */
1396 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1397 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1400 dma_free_coherent(smmu_domain->smmu->dev,
1401 CTXDESC_CD_DWORDS << 3,
1405 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1408 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1410 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1416 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1417 struct io_pgtable_cfg *pgtbl_cfg)
1421 struct arm_smmu_device *smmu = smmu_domain->smmu;
1422 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1424 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1425 if (IS_ERR_VALUE(asid))
1428 cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1429 &cfg->cdptr_dma, GFP_KERNEL);
1431 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1435 cfg->cd.asid = asid;
1436 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1437 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1438 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1442 arm_smmu_bitmap_free(smmu->asid_map, asid);
1446 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1447 struct io_pgtable_cfg *pgtbl_cfg)
1450 struct arm_smmu_device *smmu = smmu_domain->smmu;
1451 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1453 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1454 if (IS_ERR_VALUE(vmid))
1458 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1459 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1463 static struct iommu_ops arm_smmu_ops;
1465 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1468 unsigned long ias, oas;
1469 enum io_pgtable_fmt fmt;
1470 struct io_pgtable_cfg pgtbl_cfg;
1471 struct io_pgtable_ops *pgtbl_ops;
1472 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1473 struct io_pgtable_cfg *);
1474 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1475 struct arm_smmu_device *smmu = smmu_domain->smmu;
1477 /* Restrict the stage to what we can actually support */
1478 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1479 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1480 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1481 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1483 switch (smmu_domain->stage) {
1484 case ARM_SMMU_DOMAIN_S1:
1487 fmt = ARM_64_LPAE_S1;
1488 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1490 case ARM_SMMU_DOMAIN_NESTED:
1491 case ARM_SMMU_DOMAIN_S2:
1494 fmt = ARM_64_LPAE_S2;
1495 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1501 pgtbl_cfg = (struct io_pgtable_cfg) {
1502 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1505 .tlb = &arm_smmu_gather_ops,
1508 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1512 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1513 smmu_domain->pgtbl_ops = pgtbl_ops;
1515 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1516 if (IS_ERR_VALUE(ret))
1517 free_io_pgtable_ops(pgtbl_ops);
1522 static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1524 struct iommu_group *group;
1525 struct arm_smmu_group *smmu_group;
1527 group = iommu_group_get(dev);
1531 smmu_group = iommu_group_get_iommudata(group);
1532 iommu_group_put(group);
1536 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1539 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1541 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1542 struct arm_smmu_strtab_l1_desc *l1_desc;
1545 /* Two-level walk */
1546 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1547 l1_desc = &cfg->l1_desc[idx];
1548 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1549 step = &l1_desc->l2ptr[idx];
1551 /* Simple linear lookup */
1552 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1558 static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1561 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1562 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1563 struct arm_smmu_device *smmu = smmu_group->smmu;
1565 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1566 ste->s1_cfg = &smmu_domain->s1_cfg;
1568 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1571 ste->s2_cfg = &smmu_domain->s2_cfg;
1574 for (i = 0; i < smmu_group->num_sids; ++i) {
1575 u32 sid = smmu_group->sids[i];
1576 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1578 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1584 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1587 struct arm_smmu_device *smmu;
1588 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1589 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1594 /* Already attached to a different domain? */
1595 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1598 smmu = smmu_group->smmu;
1599 mutex_lock(&smmu_domain->init_mutex);
1601 if (!smmu_domain->smmu) {
1602 smmu_domain->smmu = smmu;
1603 ret = arm_smmu_domain_finalise(domain);
1605 smmu_domain->smmu = NULL;
1608 } else if (smmu_domain->smmu != smmu) {
1610 "cannot attach to SMMU %s (upstream of %s)\n",
1611 dev_name(smmu_domain->smmu->dev),
1612 dev_name(smmu->dev));
1617 /* Group already attached to this domain? */
1618 if (smmu_group->domain)
1621 smmu_group->domain = smmu_domain;
1622 smmu_group->ste.bypass = false;
1624 ret = arm_smmu_install_ste_for_group(smmu_group);
1625 if (IS_ERR_VALUE(ret))
1626 smmu_group->domain = NULL;
1629 mutex_unlock(&smmu_domain->init_mutex);
1633 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1635 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1636 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1638 BUG_ON(!smmu_domain);
1639 BUG_ON(!smmu_group);
1641 mutex_lock(&smmu_domain->init_mutex);
1642 BUG_ON(smmu_group->domain != smmu_domain);
1644 smmu_group->ste.bypass = true;
1645 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1646 dev_warn(dev, "failed to install bypass STE\n");
1648 smmu_group->domain = NULL;
1649 mutex_unlock(&smmu_domain->init_mutex);
1652 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1653 phys_addr_t paddr, size_t size, int prot)
1656 unsigned long flags;
1657 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1658 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1663 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1664 ret = ops->map(ops, iova, paddr, size, prot);
1665 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1670 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1673 unsigned long flags;
1674 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1675 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1680 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1681 ret = ops->unmap(ops, iova, size);
1682 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1687 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1690 unsigned long flags;
1691 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1692 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1697 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1698 ret = ops->iova_to_phys(ops, iova);
1699 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1704 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1706 *(u32 *)sidp = alias;
1707 return 0; /* Continue walking */
1710 static void __arm_smmu_release_pci_iommudata(void *data)
1715 static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1717 struct device_node *of_node;
1718 struct arm_smmu_device *curr, *smmu = NULL;
1719 struct pci_bus *bus = pdev->bus;
1721 /* Walk up to the root bus */
1722 while (!pci_is_root_bus(bus))
1725 /* Follow the "iommus" phandle from the host controller */
1726 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1730 /* See if we can find an SMMU corresponding to the phandle */
1731 spin_lock(&arm_smmu_devices_lock);
1732 list_for_each_entry(curr, &arm_smmu_devices, list) {
1733 if (curr->dev->of_node == of_node) {
1738 spin_unlock(&arm_smmu_devices_lock);
1739 of_node_put(of_node);
1743 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1745 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1747 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1748 limit *= 1UL << STRTAB_SPLIT;
1753 static int arm_smmu_add_device(struct device *dev)
1757 struct pci_dev *pdev;
1758 struct iommu_group *group;
1759 struct arm_smmu_group *smmu_group;
1760 struct arm_smmu_device *smmu;
1762 /* We only support PCI, for now */
1763 if (!dev_is_pci(dev))
1766 pdev = to_pci_dev(dev);
1767 group = iommu_group_get_for_dev(dev);
1769 return PTR_ERR(group);
1771 smmu_group = iommu_group_get_iommudata(group);
1773 smmu = arm_smmu_get_for_pci_dev(pdev);
1779 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1785 smmu_group->ste.valid = true;
1786 smmu_group->smmu = smmu;
1787 iommu_group_set_iommudata(group, smmu_group,
1788 __arm_smmu_release_pci_iommudata);
1790 smmu = smmu_group->smmu;
1793 /* Assume SID == RID until firmware tells us otherwise */
1794 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1795 for (i = 0; i < smmu_group->num_sids; ++i) {
1796 /* If we already know about this SID, then we're done */
1797 if (smmu_group->sids[i] == sid)
1801 /* Check the SID is in range of the SMMU and our stream table */
1802 if (!arm_smmu_sid_in_range(smmu, sid)) {
1807 /* Ensure l2 strtab is initialised */
1808 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1809 ret = arm_smmu_init_l2_strtab(smmu, sid);
1814 /* Resize the SID array for the group */
1815 smmu_group->num_sids++;
1816 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1819 smmu_group->num_sids--;
1824 /* Add the new SID */
1825 sids[smmu_group->num_sids - 1] = sid;
1826 smmu_group->sids = sids;
1830 iommu_group_put(group);
1834 static void arm_smmu_remove_device(struct device *dev)
1836 iommu_group_remove_device(dev);
1839 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1840 enum iommu_attr attr, void *data)
1842 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1845 case DOMAIN_ATTR_NESTING:
1846 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1853 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1854 enum iommu_attr attr, void *data)
1857 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1859 mutex_lock(&smmu_domain->init_mutex);
1862 case DOMAIN_ATTR_NESTING:
1863 if (smmu_domain->smmu) {
1869 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1871 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1879 mutex_unlock(&smmu_domain->init_mutex);
1883 static struct iommu_ops arm_smmu_ops = {
1884 .capable = arm_smmu_capable,
1885 .domain_alloc = arm_smmu_domain_alloc,
1886 .domain_free = arm_smmu_domain_free,
1887 .attach_dev = arm_smmu_attach_dev,
1888 .detach_dev = arm_smmu_detach_dev,
1889 .map = arm_smmu_map,
1890 .unmap = arm_smmu_unmap,
1891 .iova_to_phys = arm_smmu_iova_to_phys,
1892 .add_device = arm_smmu_add_device,
1893 .remove_device = arm_smmu_remove_device,
1894 .domain_get_attr = arm_smmu_domain_get_attr,
1895 .domain_set_attr = arm_smmu_domain_set_attr,
1896 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1899 /* Probing and initialisation functions */
1900 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1901 struct arm_smmu_queue *q,
1902 unsigned long prod_off,
1903 unsigned long cons_off,
1906 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1908 q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1910 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1915 q->prod_reg = smmu->base + prod_off;
1916 q->cons_reg = smmu->base + cons_off;
1917 q->ent_dwords = dwords;
1919 q->q_base = Q_BASE_RWA;
1920 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1921 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1922 << Q_BASE_LOG2SIZE_SHIFT;
1924 q->prod = q->cons = 0;
1928 static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu,
1929 struct arm_smmu_queue *q)
1931 size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3;
1933 dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma);
1936 static void arm_smmu_free_queues(struct arm_smmu_device *smmu)
1938 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1939 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1941 if (smmu->features & ARM_SMMU_FEAT_PRI)
1942 arm_smmu_free_one_queue(smmu, &smmu->priq.q);
1945 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1950 spin_lock_init(&smmu->cmdq.lock);
1951 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1952 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1957 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1958 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1963 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1966 ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1967 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1974 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1976 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1981 static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu)
1985 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1987 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1988 for (i = 0; i < cfg->num_l1_ents; ++i) {
1989 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i];
1994 dma_free_coherent(smmu->dev, size, desc->l2ptr,
1999 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2002 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2003 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2004 void *strtab = smmu->strtab_cfg.strtab;
2006 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2007 if (!cfg->l1_desc) {
2008 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2012 for (i = 0; i < cfg->num_l1_ents; ++i) {
2013 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2014 strtab += STRTAB_L1_DESC_DWORDS << 3;
2020 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2026 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2028 /* Calculate the L1 size, capped to the SIDSIZE */
2029 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2030 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2031 if (size + STRTAB_SPLIT < smmu->sid_bits)
2033 "2-level strtab only covers %u/%u bits of SID\n",
2034 size + STRTAB_SPLIT, smmu->sid_bits);
2036 cfg->num_l1_ents = 1 << size;
2037 size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2038 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2042 "failed to allocate l1 stream table (%u bytes)\n",
2046 cfg->strtab = strtab;
2048 /* Configure strtab_base_cfg for 2 levels */
2049 reg = STRTAB_BASE_CFG_FMT_2LVL;
2050 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2051 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2052 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2053 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2054 cfg->strtab_base_cfg = reg;
2056 ret = arm_smmu_init_l1_strtab(smmu);
2058 dma_free_coherent(smmu->dev,
2060 (STRTAB_L1_DESC_DWORDS << 3),
2066 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2071 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2073 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2074 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2078 "failed to allocate linear stream table (%u bytes)\n",
2082 cfg->strtab = strtab;
2083 cfg->num_l1_ents = 1 << smmu->sid_bits;
2085 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2086 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2087 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2088 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2089 cfg->strtab_base_cfg = reg;
2091 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2095 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2100 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2101 ret = arm_smmu_init_strtab_2lvl(smmu);
2103 ret = arm_smmu_init_strtab_linear(smmu);
2108 /* Set the strtab base address */
2109 reg = smmu->strtab_cfg.strtab_dma &
2110 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2111 reg |= STRTAB_BASE_RA;
2112 smmu->strtab_cfg.strtab_base = reg;
2114 /* Allocate the first VMID for stage-2 bypass STEs */
2115 set_bit(0, smmu->vmid_map);
2119 static void arm_smmu_free_strtab(struct arm_smmu_device *smmu)
2121 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2122 u32 size = cfg->num_l1_ents;
2124 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2125 arm_smmu_free_l2_strtab(smmu);
2126 size *= STRTAB_L1_DESC_DWORDS << 3;
2128 size *= STRTAB_STE_DWORDS * 3;
2131 dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma);
2134 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2138 ret = arm_smmu_init_queues(smmu);
2142 ret = arm_smmu_init_strtab(smmu);
2144 goto out_free_queues;
2149 arm_smmu_free_queues(smmu);
2153 static void arm_smmu_free_structures(struct arm_smmu_device *smmu)
2155 arm_smmu_free_strtab(smmu);
2156 arm_smmu_free_queues(smmu);
2159 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2160 unsigned int reg_off, unsigned int ack_off)
2164 writel_relaxed(val, smmu->base + reg_off);
2165 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2166 1, ARM_SMMU_POLL_TIMEOUT_US);
2169 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2173 /* Disable IRQs first */
2174 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2175 ARM_SMMU_IRQ_CTRLACK);
2177 dev_err(smmu->dev, "failed to disable irqs\n");
2181 /* Clear the MSI address regs */
2182 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2183 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2185 /* Request wired interrupt lines */
2186 irq = smmu->evtq.q.irq;
2188 ret = devm_request_threaded_irq(smmu->dev, irq,
2189 arm_smmu_evtq_handler,
2190 arm_smmu_evtq_thread,
2191 0, "arm-smmu-v3-evtq", smmu);
2192 if (IS_ERR_VALUE(ret))
2193 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2196 irq = smmu->cmdq.q.irq;
2198 ret = devm_request_irq(smmu->dev, irq,
2199 arm_smmu_cmdq_sync_handler, 0,
2200 "arm-smmu-v3-cmdq-sync", smmu);
2201 if (IS_ERR_VALUE(ret))
2202 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2205 irq = smmu->gerr_irq;
2207 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2208 0, "arm-smmu-v3-gerror", smmu);
2209 if (IS_ERR_VALUE(ret))
2210 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2213 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2214 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2216 irq = smmu->priq.q.irq;
2218 ret = devm_request_threaded_irq(smmu->dev, irq,
2219 arm_smmu_priq_handler,
2220 arm_smmu_priq_thread,
2221 0, "arm-smmu-v3-priq",
2223 if (IS_ERR_VALUE(ret))
2225 "failed to enable priq irq\n");
2229 /* Enable interrupt generation on the SMMU */
2230 ret = arm_smmu_write_reg_sync(smmu,
2231 IRQ_CTRL_EVTQ_IRQEN |
2232 IRQ_CTRL_GERROR_IRQEN,
2233 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2235 dev_warn(smmu->dev, "failed to enable irqs\n");
2240 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2244 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2246 dev_err(smmu->dev, "failed to clear cr0\n");
2251 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2255 struct arm_smmu_cmdq_ent cmd;
2257 /* Clear CR0 and sync (disables SMMU and queue processing) */
2258 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2259 if (reg & CR0_SMMUEN)
2260 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2262 ret = arm_smmu_device_disable(smmu);
2266 /* CR1 (table and queue memory attributes) */
2267 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2268 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2269 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2270 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2271 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2272 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2273 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2275 /* CR2 (random crap) */
2276 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2277 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2280 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2281 smmu->base + ARM_SMMU_STRTAB_BASE);
2282 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2283 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2286 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2287 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2288 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2290 enables = CR0_CMDQEN;
2291 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2294 dev_err(smmu->dev, "failed to enable command queue\n");
2298 /* Invalidate any cached configuration */
2299 cmd.opcode = CMDQ_OP_CFGI_ALL;
2300 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2301 cmd.opcode = CMDQ_OP_CMD_SYNC;
2302 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2304 /* Invalidate any stale TLB entries */
2305 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2306 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2307 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2310 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2311 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2312 cmd.opcode = CMDQ_OP_CMD_SYNC;
2313 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2316 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2317 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2318 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2320 enables |= CR0_EVTQEN;
2321 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2324 dev_err(smmu->dev, "failed to enable event queue\n");
2329 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2330 writeq_relaxed(smmu->priq.q.q_base,
2331 smmu->base + ARM_SMMU_PRIQ_BASE);
2332 writel_relaxed(smmu->priq.q.prod,
2333 smmu->base + ARM_SMMU_PRIQ_PROD);
2334 writel_relaxed(smmu->priq.q.cons,
2335 smmu->base + ARM_SMMU_PRIQ_CONS);
2337 enables |= CR0_PRIQEN;
2338 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2341 dev_err(smmu->dev, "failed to enable PRI queue\n");
2346 ret = arm_smmu_setup_irqs(smmu);
2348 dev_err(smmu->dev, "failed to setup irqs\n");
2352 /* Enable the SMMU interface */
2353 enables |= CR0_SMMUEN;
2354 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2357 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2364 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2368 unsigned long pgsize_bitmap = 0;
2371 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2373 /* 2-level structures */
2374 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2375 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2377 if (reg & IDR0_CD2L)
2378 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2381 * Translation table endianness.
2382 * We currently require the same endianness as the CPU, but this
2383 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2385 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2386 case IDR0_TTENDIAN_MIXED:
2387 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2390 case IDR0_TTENDIAN_BE:
2391 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2394 case IDR0_TTENDIAN_LE:
2395 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2399 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2403 /* Boolean feature flags */
2404 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2405 smmu->features |= ARM_SMMU_FEAT_PRI;
2407 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2408 smmu->features |= ARM_SMMU_FEAT_ATS;
2411 smmu->features |= ARM_SMMU_FEAT_SEV;
2414 smmu->features |= ARM_SMMU_FEAT_MSI;
2417 smmu->features |= ARM_SMMU_FEAT_HYP;
2420 * The dma-coherent property is used in preference to the ID
2421 * register, but warn on mismatch.
2423 coherent = of_dma_is_coherent(smmu->dev->of_node);
2425 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2427 if (!!(reg & IDR0_COHACC) != coherent)
2428 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2429 coherent ? "true" : "false");
2431 if (reg & IDR0_STALL_MODEL)
2432 smmu->features |= ARM_SMMU_FEAT_STALLS;
2435 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2438 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2440 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2441 dev_err(smmu->dev, "no translation support!\n");
2445 /* We only support the AArch64 table format at present */
2446 if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) {
2447 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2451 /* ASID/VMID sizes */
2452 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2453 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2456 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2457 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2458 dev_err(smmu->dev, "embedded implementation not supported\n");
2462 /* Queue sizes, capped at 4k */
2463 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2464 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2465 if (!smmu->cmdq.q.max_n_shift) {
2466 /* Odd alignment restrictions on the base, so ignore for now */
2467 dev_err(smmu->dev, "unit-length command queue not supported\n");
2471 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2472 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2473 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2474 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2476 /* SID/SSID sizes */
2477 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2478 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2481 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2483 /* Maximum number of outstanding stalls */
2484 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2485 & IDR5_STALL_MAX_MASK;
2488 if (reg & IDR5_GRAN64K)
2489 pgsize_bitmap |= SZ_64K | SZ_512M;
2490 if (reg & IDR5_GRAN16K)
2491 pgsize_bitmap |= SZ_16K | SZ_32M;
2492 if (reg & IDR5_GRAN4K)
2493 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2495 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2497 /* Output address size */
2498 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2499 case IDR5_OAS_32_BIT:
2502 case IDR5_OAS_36_BIT:
2505 case IDR5_OAS_40_BIT:
2508 case IDR5_OAS_42_BIT:
2511 case IDR5_OAS_44_BIT:
2514 case IDR5_OAS_48_BIT:
2518 dev_err(smmu->dev, "unknown output address size!\n");
2522 /* Set the DMA mask for our table walker */
2523 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2525 "failed to set DMA mask for table walker\n");
2528 smmu->ias = smmu->oas;
2530 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2531 smmu->ias, smmu->oas, smmu->features);
2535 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2538 struct resource *res;
2539 struct arm_smmu_device *smmu;
2540 struct device *dev = &pdev->dev;
2542 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2544 dev_err(dev, "failed to allocate arm_smmu_device\n");
2550 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2551 if (resource_size(res) + 1 < SZ_128K) {
2552 dev_err(dev, "MMIO region too small (%pr)\n", res);
2556 smmu->base = devm_ioremap_resource(dev, res);
2557 if (IS_ERR(smmu->base))
2558 return PTR_ERR(smmu->base);
2560 /* Interrupt lines */
2561 irq = platform_get_irq_byname(pdev, "eventq");
2563 smmu->evtq.q.irq = irq;
2565 irq = platform_get_irq_byname(pdev, "priq");
2567 smmu->priq.q.irq = irq;
2569 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2571 smmu->cmdq.q.irq = irq;
2573 irq = platform_get_irq_byname(pdev, "gerror");
2575 smmu->gerr_irq = irq;
2578 ret = arm_smmu_device_probe(smmu);
2582 /* Initialise in-memory data structures */
2583 ret = arm_smmu_init_structures(smmu);
2587 /* Reset the device */
2588 ret = arm_smmu_device_reset(smmu);
2590 goto out_free_structures;
2592 /* Record our private device structure */
2593 INIT_LIST_HEAD(&smmu->list);
2594 spin_lock(&arm_smmu_devices_lock);
2595 list_add(&smmu->list, &arm_smmu_devices);
2596 spin_unlock(&arm_smmu_devices_lock);
2599 out_free_structures:
2600 arm_smmu_free_structures(smmu);
2604 static int arm_smmu_device_remove(struct platform_device *pdev)
2606 struct arm_smmu_device *curr, *smmu = NULL;
2607 struct device *dev = &pdev->dev;
2609 spin_lock(&arm_smmu_devices_lock);
2610 list_for_each_entry(curr, &arm_smmu_devices, list) {
2611 if (curr->dev == dev) {
2613 list_del(&smmu->list);
2617 spin_unlock(&arm_smmu_devices_lock);
2622 arm_smmu_device_disable(smmu);
2623 arm_smmu_free_structures(smmu);
2627 static struct of_device_id arm_smmu_of_match[] = {
2628 { .compatible = "arm,smmu-v3", },
2631 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2633 static struct platform_driver arm_smmu_driver = {
2635 .name = "arm-smmu-v3",
2636 .of_match_table = of_match_ptr(arm_smmu_of_match),
2638 .probe = arm_smmu_device_dt_probe,
2639 .remove = arm_smmu_device_remove,
2642 static int __init arm_smmu_init(void)
2644 struct device_node *np;
2647 np = of_find_matching_node(NULL, arm_smmu_of_match);
2653 ret = platform_driver_register(&arm_smmu_driver);
2657 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2660 static void __exit arm_smmu_exit(void)
2662 return platform_driver_unregister(&arm_smmu_driver);
2665 subsys_initcall(arm_smmu_init);
2666 module_exit(arm_smmu_exit);
2668 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2669 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2670 MODULE_LICENSE("GPL v2");