Commit | Line | Data |
---|---|---|
d25a2a16 LP |
1 | /* |
2 | * IPMMU VMSA | |
3 | * | |
4 | * Copyright (C) 2014 Renesas Electronics Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; version 2 of the License. | |
9 | */ | |
10 | ||
11 | #include <linux/delay.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/export.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/io.h> | |
17 | #include <linux/iommu.h> | |
18 | #include <linux/module.h> | |
275f5053 | 19 | #include <linux/of.h> |
d25a2a16 LP |
20 | #include <linux/platform_device.h> |
21 | #include <linux/sizes.h> | |
22 | #include <linux/slab.h> | |
23 | ||
24 | #include <asm/dma-iommu.h> | |
25 | #include <asm/pgalloc.h> | |
26 | ||
f20ed39f LP |
27 | #include "io-pgtable.h" |
28 | ||
d25a2a16 LP |
29 | struct ipmmu_vmsa_device { |
30 | struct device *dev; | |
31 | void __iomem *base; | |
32 | struct list_head list; | |
33 | ||
d25a2a16 LP |
34 | unsigned int num_utlbs; |
35 | ||
36 | struct dma_iommu_mapping *mapping; | |
37 | }; | |
38 | ||
39 | struct ipmmu_vmsa_domain { | |
40 | struct ipmmu_vmsa_device *mmu; | |
41 | struct iommu_domain *io_domain; | |
42 | ||
f20ed39f LP |
43 | struct io_pgtable_cfg cfg; |
44 | struct io_pgtable_ops *iop; | |
45 | ||
d25a2a16 LP |
46 | unsigned int context_id; |
47 | spinlock_t lock; /* Protects mappings */ | |
d25a2a16 LP |
48 | }; |
49 | ||
192d2045 LP |
50 | struct ipmmu_vmsa_archdata { |
51 | struct ipmmu_vmsa_device *mmu; | |
a166d31e LP |
52 | unsigned int *utlbs; |
53 | unsigned int num_utlbs; | |
192d2045 LP |
54 | }; |
55 | ||
d25a2a16 LP |
56 | static DEFINE_SPINLOCK(ipmmu_devices_lock); |
57 | static LIST_HEAD(ipmmu_devices); | |
58 | ||
59 | #define TLB_LOOP_TIMEOUT 100 /* 100us */ | |
60 | ||
61 | /* ----------------------------------------------------------------------------- | |
62 | * Registers Definition | |
63 | */ | |
64 | ||
275f5053 LP |
65 | #define IM_NS_ALIAS_OFFSET 0x800 |
66 | ||
d25a2a16 LP |
67 | #define IM_CTX_SIZE 0x40 |
68 | ||
69 | #define IMCTR 0x0000 | |
70 | #define IMCTR_TRE (1 << 17) | |
71 | #define IMCTR_AFE (1 << 16) | |
72 | #define IMCTR_RTSEL_MASK (3 << 4) | |
73 | #define IMCTR_RTSEL_SHIFT 4 | |
74 | #define IMCTR_TREN (1 << 3) | |
75 | #define IMCTR_INTEN (1 << 2) | |
76 | #define IMCTR_FLUSH (1 << 1) | |
77 | #define IMCTR_MMUEN (1 << 0) | |
78 | ||
79 | #define IMCAAR 0x0004 | |
80 | ||
81 | #define IMTTBCR 0x0008 | |
82 | #define IMTTBCR_EAE (1 << 31) | |
83 | #define IMTTBCR_PMB (1 << 30) | |
84 | #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) | |
85 | #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) | |
86 | #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) | |
87 | #define IMTTBCR_SH1_MASK (3 << 28) | |
88 | #define IMTTBCR_ORGN1_NC (0 << 26) | |
89 | #define IMTTBCR_ORGN1_WB_WA (1 << 26) | |
90 | #define IMTTBCR_ORGN1_WT (2 << 26) | |
91 | #define IMTTBCR_ORGN1_WB (3 << 26) | |
92 | #define IMTTBCR_ORGN1_MASK (3 << 26) | |
93 | #define IMTTBCR_IRGN1_NC (0 << 24) | |
94 | #define IMTTBCR_IRGN1_WB_WA (1 << 24) | |
95 | #define IMTTBCR_IRGN1_WT (2 << 24) | |
96 | #define IMTTBCR_IRGN1_WB (3 << 24) | |
97 | #define IMTTBCR_IRGN1_MASK (3 << 24) | |
98 | #define IMTTBCR_TSZ1_MASK (7 << 16) | |
99 | #define IMTTBCR_TSZ1_SHIFT 16 | |
100 | #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) | |
101 | #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) | |
102 | #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) | |
103 | #define IMTTBCR_SH0_MASK (3 << 12) | |
104 | #define IMTTBCR_ORGN0_NC (0 << 10) | |
105 | #define IMTTBCR_ORGN0_WB_WA (1 << 10) | |
106 | #define IMTTBCR_ORGN0_WT (2 << 10) | |
107 | #define IMTTBCR_ORGN0_WB (3 << 10) | |
108 | #define IMTTBCR_ORGN0_MASK (3 << 10) | |
109 | #define IMTTBCR_IRGN0_NC (0 << 8) | |
110 | #define IMTTBCR_IRGN0_WB_WA (1 << 8) | |
111 | #define IMTTBCR_IRGN0_WT (2 << 8) | |
112 | #define IMTTBCR_IRGN0_WB (3 << 8) | |
113 | #define IMTTBCR_IRGN0_MASK (3 << 8) | |
114 | #define IMTTBCR_SL0_LVL_2 (0 << 4) | |
115 | #define IMTTBCR_SL0_LVL_1 (1 << 4) | |
116 | #define IMTTBCR_TSZ0_MASK (7 << 0) | |
117 | #define IMTTBCR_TSZ0_SHIFT O | |
118 | ||
119 | #define IMBUSCR 0x000c | |
120 | #define IMBUSCR_DVM (1 << 2) | |
121 | #define IMBUSCR_BUSSEL_SYS (0 << 0) | |
122 | #define IMBUSCR_BUSSEL_CCI (1 << 0) | |
123 | #define IMBUSCR_BUSSEL_IMCAAR (2 << 0) | |
124 | #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) | |
125 | #define IMBUSCR_BUSSEL_MASK (3 << 0) | |
126 | ||
127 | #define IMTTLBR0 0x0010 | |
128 | #define IMTTUBR0 0x0014 | |
129 | #define IMTTLBR1 0x0018 | |
130 | #define IMTTUBR1 0x001c | |
131 | ||
132 | #define IMSTR 0x0020 | |
133 | #define IMSTR_ERRLVL_MASK (3 << 12) | |
134 | #define IMSTR_ERRLVL_SHIFT 12 | |
135 | #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) | |
136 | #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) | |
137 | #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) | |
138 | #define IMSTR_ERRCODE_MASK (7 << 8) | |
139 | #define IMSTR_MHIT (1 << 4) | |
140 | #define IMSTR_ABORT (1 << 2) | |
141 | #define IMSTR_PF (1 << 1) | |
142 | #define IMSTR_TF (1 << 0) | |
143 | ||
144 | #define IMMAIR0 0x0028 | |
145 | #define IMMAIR1 0x002c | |
146 | #define IMMAIR_ATTR_MASK 0xff | |
147 | #define IMMAIR_ATTR_DEVICE 0x04 | |
148 | #define IMMAIR_ATTR_NC 0x44 | |
149 | #define IMMAIR_ATTR_WBRWA 0xff | |
150 | #define IMMAIR_ATTR_SHIFT(n) ((n) << 3) | |
151 | #define IMMAIR_ATTR_IDX_NC 0 | |
152 | #define IMMAIR_ATTR_IDX_WBRWA 1 | |
153 | #define IMMAIR_ATTR_IDX_DEV 2 | |
154 | ||
155 | #define IMEAR 0x0030 | |
156 | ||
157 | #define IMPCTR 0x0200 | |
158 | #define IMPSTR 0x0208 | |
159 | #define IMPEAR 0x020c | |
160 | #define IMPMBA(n) (0x0280 + ((n) * 4)) | |
161 | #define IMPMBD(n) (0x02c0 + ((n) * 4)) | |
162 | ||
163 | #define IMUCTR(n) (0x0300 + ((n) * 16)) | |
164 | #define IMUCTR_FIXADDEN (1 << 31) | |
165 | #define IMUCTR_FIXADD_MASK (0xff << 16) | |
166 | #define IMUCTR_FIXADD_SHIFT 16 | |
167 | #define IMUCTR_TTSEL_MMU(n) ((n) << 4) | |
168 | #define IMUCTR_TTSEL_PMB (8 << 4) | |
169 | #define IMUCTR_TTSEL_MASK (15 << 4) | |
170 | #define IMUCTR_FLUSH (1 << 1) | |
171 | #define IMUCTR_MMUEN (1 << 0) | |
172 | ||
173 | #define IMUASID(n) (0x0308 + ((n) * 16)) | |
174 | #define IMUASID_ASID8_MASK (0xff << 8) | |
175 | #define IMUASID_ASID8_SHIFT 8 | |
176 | #define IMUASID_ASID0_MASK (0xff << 0) | |
177 | #define IMUASID_ASID0_SHIFT 0 | |
178 | ||
d25a2a16 LP |
179 | /* ----------------------------------------------------------------------------- |
180 | * Read/Write Access | |
181 | */ | |
182 | ||
183 | static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) | |
184 | { | |
185 | return ioread32(mmu->base + offset); | |
186 | } | |
187 | ||
188 | static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, | |
189 | u32 data) | |
190 | { | |
191 | iowrite32(data, mmu->base + offset); | |
192 | } | |
193 | ||
194 | static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) | |
195 | { | |
196 | return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); | |
197 | } | |
198 | ||
199 | static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, | |
200 | u32 data) | |
201 | { | |
202 | ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); | |
203 | } | |
204 | ||
205 | /* ----------------------------------------------------------------------------- | |
206 | * TLB and microTLB Management | |
207 | */ | |
208 | ||
209 | /* Wait for any pending TLB invalidations to complete */ | |
210 | static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) | |
211 | { | |
212 | unsigned int count = 0; | |
213 | ||
214 | while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { | |
215 | cpu_relax(); | |
216 | if (++count == TLB_LOOP_TIMEOUT) { | |
217 | dev_err_ratelimited(domain->mmu->dev, | |
218 | "TLB sync timed out -- MMU may be deadlocked\n"); | |
219 | return; | |
220 | } | |
221 | udelay(1); | |
222 | } | |
223 | } | |
224 | ||
225 | static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) | |
226 | { | |
227 | u32 reg; | |
228 | ||
229 | reg = ipmmu_ctx_read(domain, IMCTR); | |
230 | reg |= IMCTR_FLUSH; | |
231 | ipmmu_ctx_write(domain, IMCTR, reg); | |
232 | ||
233 | ipmmu_tlb_sync(domain); | |
234 | } | |
235 | ||
236 | /* | |
237 | * Enable MMU translation for the microTLB. | |
238 | */ | |
239 | static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, | |
192d2045 | 240 | unsigned int utlb) |
d25a2a16 LP |
241 | { |
242 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
243 | ||
192d2045 LP |
244 | /* |
245 | * TODO: Reference-count the microTLB as several bus masters can be | |
246 | * connected to the same microTLB. | |
247 | */ | |
248 | ||
d25a2a16 | 249 | /* TODO: What should we set the ASID to ? */ |
192d2045 | 250 | ipmmu_write(mmu, IMUASID(utlb), 0); |
d25a2a16 | 251 | /* TODO: Do we need to flush the microTLB ? */ |
192d2045 | 252 | ipmmu_write(mmu, IMUCTR(utlb), |
d25a2a16 LP |
253 | IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | |
254 | IMUCTR_MMUEN); | |
255 | } | |
256 | ||
257 | /* | |
258 | * Disable MMU translation for the microTLB. | |
259 | */ | |
260 | static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, | |
192d2045 | 261 | unsigned int utlb) |
d25a2a16 LP |
262 | { |
263 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
264 | ||
192d2045 | 265 | ipmmu_write(mmu, IMUCTR(utlb), 0); |
d25a2a16 LP |
266 | } |
267 | ||
f20ed39f | 268 | static void ipmmu_tlb_flush_all(void *cookie) |
d25a2a16 | 269 | { |
f20ed39f LP |
270 | struct ipmmu_vmsa_domain *domain = cookie; |
271 | ||
272 | ipmmu_tlb_invalidate(domain); | |
273 | } | |
274 | ||
275 | static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, | |
276 | void *cookie) | |
277 | { | |
278 | /* The hardware doesn't support selective TLB flush. */ | |
279 | } | |
280 | ||
281 | static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie) | |
282 | { | |
283 | unsigned long offset = (unsigned long)ptr & ~PAGE_MASK; | |
284 | struct ipmmu_vmsa_domain *domain = cookie; | |
d25a2a16 LP |
285 | |
286 | /* | |
287 | * TODO: Add support for coherent walk through CCI with DVM and remove | |
288 | * cache handling. | |
289 | */ | |
f20ed39f LP |
290 | dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size, |
291 | DMA_TO_DEVICE); | |
d25a2a16 LP |
292 | } |
293 | ||
f20ed39f LP |
294 | static struct iommu_gather_ops ipmmu_gather_ops = { |
295 | .tlb_flush_all = ipmmu_tlb_flush_all, | |
296 | .tlb_add_flush = ipmmu_tlb_add_flush, | |
297 | .tlb_sync = ipmmu_tlb_flush_all, | |
298 | .flush_pgtable = ipmmu_flush_pgtable, | |
299 | }; | |
300 | ||
d25a2a16 LP |
301 | /* ----------------------------------------------------------------------------- |
302 | * Domain/Context Management | |
303 | */ | |
304 | ||
305 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |
306 | { | |
307 | phys_addr_t ttbr; | |
f20ed39f LP |
308 | |
309 | /* | |
310 | * Allocate the page table operations. | |
311 | * | |
312 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory | |
313 | * access, Long-descriptor format" that the NStable bit being set in a | |
314 | * table descriptor will result in the NStable and NS bits of all child | |
315 | * entries being ignored and considered as being set. The IPMMU seems | |
316 | * not to comply with this, as it generates a secure access page fault | |
317 | * if any of the NStable and NS bits isn't set when running in | |
318 | * non-secure mode. | |
319 | */ | |
320 | domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; | |
321 | domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | |
322 | domain->cfg.ias = 32; | |
323 | domain->cfg.oas = 40; | |
324 | domain->cfg.tlb = &ipmmu_gather_ops; | |
325 | ||
326 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, | |
327 | domain); | |
328 | if (!domain->iop) | |
329 | return -EINVAL; | |
d25a2a16 LP |
330 | |
331 | /* | |
332 | * TODO: When adding support for multiple contexts, find an unused | |
333 | * context. | |
334 | */ | |
335 | domain->context_id = 0; | |
336 | ||
337 | /* TTBR0 */ | |
f20ed39f | 338 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
d25a2a16 LP |
339 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); |
340 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); | |
341 | ||
342 | /* | |
343 | * TTBCR | |
344 | * We use long descriptors with inner-shareable WBWA tables and allocate | |
345 | * the whole 32-bit VA space to TTBR0. | |
346 | */ | |
347 | ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | | |
348 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | |
349 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); | |
350 | ||
f20ed39f LP |
351 | /* MAIR0 */ |
352 | ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); | |
d25a2a16 LP |
353 | |
354 | /* IMBUSCR */ | |
355 | ipmmu_ctx_write(domain, IMBUSCR, | |
356 | ipmmu_ctx_read(domain, IMBUSCR) & | |
357 | ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); | |
358 | ||
359 | /* | |
360 | * IMSTR | |
361 | * Clear all interrupt flags. | |
362 | */ | |
363 | ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); | |
364 | ||
365 | /* | |
366 | * IMCTR | |
367 | * Enable the MMU and interrupt generation. The long-descriptor | |
368 | * translation table format doesn't use TEX remapping. Don't enable AF | |
369 | * software management as we have no use for it. Flush the TLB as | |
370 | * required when modifying the context registers. | |
371 | */ | |
372 | ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
377 | static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) | |
378 | { | |
379 | /* | |
380 | * Disable the context. Flush the TLB as required when modifying the | |
381 | * context registers. | |
382 | * | |
383 | * TODO: Is TLB flush really needed ? | |
384 | */ | |
385 | ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); | |
386 | ipmmu_tlb_sync(domain); | |
387 | } | |
388 | ||
389 | /* ----------------------------------------------------------------------------- | |
390 | * Fault Handling | |
391 | */ | |
392 | ||
393 | static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) | |
394 | { | |
395 | const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; | |
396 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
397 | u32 status; | |
398 | u32 iova; | |
399 | ||
400 | status = ipmmu_ctx_read(domain, IMSTR); | |
401 | if (!(status & err_mask)) | |
402 | return IRQ_NONE; | |
403 | ||
404 | iova = ipmmu_ctx_read(domain, IMEAR); | |
405 | ||
406 | /* | |
407 | * Clear the error status flags. Unlike traditional interrupt flag | |
408 | * registers that must be cleared by writing 1, this status register | |
409 | * seems to require 0. The error address register must be read before, | |
410 | * otherwise its value will be 0. | |
411 | */ | |
412 | ipmmu_ctx_write(domain, IMSTR, 0); | |
413 | ||
414 | /* Log fatal errors. */ | |
415 | if (status & IMSTR_MHIT) | |
416 | dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", | |
417 | iova); | |
418 | if (status & IMSTR_ABORT) | |
419 | dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", | |
420 | iova); | |
421 | ||
422 | if (!(status & (IMSTR_PF | IMSTR_TF))) | |
423 | return IRQ_NONE; | |
424 | ||
425 | /* | |
426 | * Try to handle page faults and translation faults. | |
427 | * | |
428 | * TODO: We need to look up the faulty device based on the I/O VA. Use | |
429 | * the IOMMU device for now. | |
430 | */ | |
431 | if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0)) | |
432 | return IRQ_HANDLED; | |
433 | ||
434 | dev_err_ratelimited(mmu->dev, | |
435 | "Unhandled fault: status 0x%08x iova 0x%08x\n", | |
436 | status, iova); | |
437 | ||
438 | return IRQ_HANDLED; | |
439 | } | |
440 | ||
441 | static irqreturn_t ipmmu_irq(int irq, void *dev) | |
442 | { | |
443 | struct ipmmu_vmsa_device *mmu = dev; | |
444 | struct iommu_domain *io_domain; | |
445 | struct ipmmu_vmsa_domain *domain; | |
446 | ||
447 | if (!mmu->mapping) | |
448 | return IRQ_NONE; | |
449 | ||
450 | io_domain = mmu->mapping->domain; | |
451 | domain = io_domain->priv; | |
452 | ||
453 | return ipmmu_domain_irq(domain); | |
454 | } | |
455 | ||
d25a2a16 LP |
456 | /* ----------------------------------------------------------------------------- |
457 | * IOMMU Operations | |
458 | */ | |
459 | ||
d25a2a16 LP |
460 | static int ipmmu_domain_init(struct iommu_domain *io_domain) |
461 | { | |
462 | struct ipmmu_vmsa_domain *domain; | |
463 | ||
464 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
465 | if (!domain) | |
466 | return -ENOMEM; | |
467 | ||
468 | spin_lock_init(&domain->lock); | |
469 | ||
d25a2a16 LP |
470 | io_domain->priv = domain; |
471 | domain->io_domain = io_domain; | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
476 | static void ipmmu_domain_destroy(struct iommu_domain *io_domain) | |
477 | { | |
478 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | |
479 | ||
480 | /* | |
481 | * Free the domain resources. We assume that all devices have already | |
482 | * been detached. | |
483 | */ | |
484 | ipmmu_domain_destroy_context(domain); | |
f20ed39f | 485 | free_io_pgtable_ops(domain->iop); |
d25a2a16 LP |
486 | kfree(domain); |
487 | } | |
488 | ||
489 | static int ipmmu_attach_device(struct iommu_domain *io_domain, | |
490 | struct device *dev) | |
491 | { | |
192d2045 LP |
492 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
493 | struct ipmmu_vmsa_device *mmu = archdata->mmu; | |
d25a2a16 | 494 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
d25a2a16 | 495 | unsigned long flags; |
a166d31e | 496 | unsigned int i; |
d25a2a16 LP |
497 | int ret = 0; |
498 | ||
499 | if (!mmu) { | |
500 | dev_err(dev, "Cannot attach to IPMMU\n"); | |
501 | return -ENXIO; | |
502 | } | |
503 | ||
504 | spin_lock_irqsave(&domain->lock, flags); | |
505 | ||
506 | if (!domain->mmu) { | |
507 | /* The domain hasn't been used yet, initialize it. */ | |
508 | domain->mmu = mmu; | |
509 | ret = ipmmu_domain_init_context(domain); | |
510 | } else if (domain->mmu != mmu) { | |
511 | /* | |
512 | * Something is wrong, we can't attach two devices using | |
513 | * different IOMMUs to the same domain. | |
514 | */ | |
515 | dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", | |
516 | dev_name(mmu->dev), dev_name(domain->mmu->dev)); | |
517 | ret = -EINVAL; | |
518 | } | |
519 | ||
520 | spin_unlock_irqrestore(&domain->lock, flags); | |
521 | ||
522 | if (ret < 0) | |
523 | return ret; | |
524 | ||
a166d31e LP |
525 | for (i = 0; i < archdata->num_utlbs; ++i) |
526 | ipmmu_utlb_enable(domain, archdata->utlbs[i]); | |
d25a2a16 LP |
527 | |
528 | return 0; | |
529 | } | |
530 | ||
531 | static void ipmmu_detach_device(struct iommu_domain *io_domain, | |
532 | struct device *dev) | |
533 | { | |
192d2045 | 534 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
d25a2a16 | 535 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
a166d31e | 536 | unsigned int i; |
d25a2a16 | 537 | |
a166d31e LP |
538 | for (i = 0; i < archdata->num_utlbs; ++i) |
539 | ipmmu_utlb_disable(domain, archdata->utlbs[i]); | |
d25a2a16 LP |
540 | |
541 | /* | |
542 | * TODO: Optimize by disabling the context when no device is attached. | |
543 | */ | |
544 | } | |
545 | ||
546 | static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | |
547 | phys_addr_t paddr, size_t size, int prot) | |
548 | { | |
549 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | |
550 | ||
551 | if (!domain) | |
552 | return -ENODEV; | |
553 | ||
f20ed39f | 554 | return domain->iop->map(domain->iop, iova, paddr, size, prot); |
d25a2a16 LP |
555 | } |
556 | ||
557 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | |
558 | size_t size) | |
559 | { | |
560 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | |
d25a2a16 | 561 | |
f20ed39f | 562 | return domain->iop->unmap(domain->iop, iova, size); |
d25a2a16 LP |
563 | } |
564 | ||
565 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | |
566 | dma_addr_t iova) | |
567 | { | |
568 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | |
d25a2a16 LP |
569 | |
570 | /* TODO: Is locking needed ? */ | |
571 | ||
f20ed39f | 572 | return domain->iop->iova_to_phys(domain->iop, iova); |
d25a2a16 LP |
573 | } |
574 | ||
a166d31e LP |
575 | static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, |
576 | unsigned int **_utlbs) | |
192d2045 | 577 | { |
a166d31e LP |
578 | unsigned int *utlbs; |
579 | unsigned int i; | |
580 | int count; | |
275f5053 | 581 | |
a166d31e LP |
582 | count = of_count_phandle_with_args(dev->of_node, "iommus", |
583 | "#iommu-cells"); | |
584 | if (count < 0) | |
585 | return -EINVAL; | |
586 | ||
587 | utlbs = kcalloc(count, sizeof(*utlbs), GFP_KERNEL); | |
588 | if (!utlbs) | |
589 | return -ENOMEM; | |
590 | ||
591 | for (i = 0; i < count; ++i) { | |
592 | struct of_phandle_args args; | |
593 | int ret; | |
594 | ||
595 | ret = of_parse_phandle_with_args(dev->of_node, "iommus", | |
596 | "#iommu-cells", i, &args); | |
597 | if (ret < 0) | |
598 | goto error; | |
599 | ||
600 | of_node_put(args.np); | |
601 | ||
602 | if (args.np != mmu->dev->of_node || args.args_count != 1) | |
603 | goto error; | |
604 | ||
605 | utlbs[i] = args.args[0]; | |
606 | } | |
275f5053 | 607 | |
a166d31e | 608 | *_utlbs = utlbs; |
275f5053 | 609 | |
a166d31e | 610 | return count; |
275f5053 | 611 | |
a166d31e LP |
612 | error: |
613 | kfree(utlbs); | |
614 | return -EINVAL; | |
192d2045 LP |
615 | } |
616 | ||
d25a2a16 LP |
617 | static int ipmmu_add_device(struct device *dev) |
618 | { | |
192d2045 | 619 | struct ipmmu_vmsa_archdata *archdata; |
d25a2a16 | 620 | struct ipmmu_vmsa_device *mmu; |
a166d31e LP |
621 | struct iommu_group *group = NULL; |
622 | unsigned int *utlbs = NULL; | |
623 | unsigned int i; | |
624 | int num_utlbs = 0; | |
d25a2a16 LP |
625 | int ret; |
626 | ||
627 | if (dev->archdata.iommu) { | |
628 | dev_warn(dev, "IOMMU driver already assigned to device %s\n", | |
629 | dev_name(dev)); | |
630 | return -EINVAL; | |
631 | } | |
632 | ||
633 | /* Find the master corresponding to the device. */ | |
634 | spin_lock(&ipmmu_devices_lock); | |
635 | ||
636 | list_for_each_entry(mmu, &ipmmu_devices, list) { | |
a166d31e LP |
637 | num_utlbs = ipmmu_find_utlbs(mmu, dev, &utlbs); |
638 | if (num_utlbs) { | |
d25a2a16 | 639 | /* |
192d2045 | 640 | * TODO Take a reference to the MMU to protect |
d25a2a16 LP |
641 | * against device removal. |
642 | */ | |
643 | break; | |
644 | } | |
645 | } | |
646 | ||
647 | spin_unlock(&ipmmu_devices_lock); | |
648 | ||
a166d31e | 649 | if (num_utlbs <= 0) |
d25a2a16 LP |
650 | return -ENODEV; |
651 | ||
a166d31e LP |
652 | for (i = 0; i < num_utlbs; ++i) { |
653 | if (utlbs[i] >= mmu->num_utlbs) { | |
654 | ret = -EINVAL; | |
655 | goto error; | |
656 | } | |
657 | } | |
d25a2a16 LP |
658 | |
659 | /* Create a device group and add the device to it. */ | |
660 | group = iommu_group_alloc(); | |
661 | if (IS_ERR(group)) { | |
662 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
a166d31e LP |
663 | ret = PTR_ERR(group); |
664 | goto error; | |
d25a2a16 LP |
665 | } |
666 | ||
667 | ret = iommu_group_add_device(group, dev); | |
668 | iommu_group_put(group); | |
669 | ||
670 | if (ret < 0) { | |
671 | dev_err(dev, "Failed to add device to IPMMU group\n"); | |
a166d31e LP |
672 | group = NULL; |
673 | goto error; | |
d25a2a16 LP |
674 | } |
675 | ||
192d2045 LP |
676 | archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); |
677 | if (!archdata) { | |
678 | ret = -ENOMEM; | |
679 | goto error; | |
680 | } | |
681 | ||
682 | archdata->mmu = mmu; | |
a166d31e LP |
683 | archdata->utlbs = utlbs; |
684 | archdata->num_utlbs = num_utlbs; | |
192d2045 | 685 | dev->archdata.iommu = archdata; |
d25a2a16 LP |
686 | |
687 | /* | |
688 | * Create the ARM mapping, used by the ARM DMA mapping core to allocate | |
689 | * VAs. This will allocate a corresponding IOMMU domain. | |
690 | * | |
691 | * TODO: | |
692 | * - Create one mapping per context (TLB). | |
693 | * - Make the mapping size configurable ? We currently use a 2GB mapping | |
694 | * at a 1GB offset to ensure that NULL VAs will fault. | |
695 | */ | |
696 | if (!mmu->mapping) { | |
697 | struct dma_iommu_mapping *mapping; | |
698 | ||
699 | mapping = arm_iommu_create_mapping(&platform_bus_type, | |
720b0cef | 700 | SZ_1G, SZ_2G); |
d25a2a16 LP |
701 | if (IS_ERR(mapping)) { |
702 | dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); | |
b8f80bff LP |
703 | ret = PTR_ERR(mapping); |
704 | goto error; | |
d25a2a16 LP |
705 | } |
706 | ||
707 | mmu->mapping = mapping; | |
708 | } | |
709 | ||
710 | /* Attach the ARM VA mapping to the device. */ | |
711 | ret = arm_iommu_attach_device(dev, mmu->mapping); | |
712 | if (ret < 0) { | |
713 | dev_err(dev, "Failed to attach device to VA mapping\n"); | |
714 | goto error; | |
715 | } | |
716 | ||
717 | return 0; | |
718 | ||
719 | error: | |
b8f80bff | 720 | arm_iommu_release_mapping(mmu->mapping); |
a166d31e | 721 | |
192d2045 | 722 | kfree(dev->archdata.iommu); |
a166d31e LP |
723 | kfree(utlbs); |
724 | ||
d25a2a16 | 725 | dev->archdata.iommu = NULL; |
a166d31e LP |
726 | |
727 | if (!IS_ERR_OR_NULL(group)) | |
728 | iommu_group_remove_device(dev); | |
729 | ||
d25a2a16 LP |
730 | return ret; |
731 | } | |
732 | ||
733 | static void ipmmu_remove_device(struct device *dev) | |
734 | { | |
a166d31e LP |
735 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
736 | ||
d25a2a16 LP |
737 | arm_iommu_detach_device(dev); |
738 | iommu_group_remove_device(dev); | |
a166d31e LP |
739 | |
740 | kfree(archdata->utlbs); | |
741 | kfree(archdata); | |
742 | ||
d25a2a16 LP |
743 | dev->archdata.iommu = NULL; |
744 | } | |
745 | ||
b22f6434 | 746 | static const struct iommu_ops ipmmu_ops = { |
d25a2a16 LP |
747 | .domain_init = ipmmu_domain_init, |
748 | .domain_destroy = ipmmu_domain_destroy, | |
749 | .attach_dev = ipmmu_attach_device, | |
750 | .detach_dev = ipmmu_detach_device, | |
751 | .map = ipmmu_map, | |
752 | .unmap = ipmmu_unmap, | |
315786eb | 753 | .map_sg = default_iommu_map_sg, |
d25a2a16 LP |
754 | .iova_to_phys = ipmmu_iova_to_phys, |
755 | .add_device = ipmmu_add_device, | |
756 | .remove_device = ipmmu_remove_device, | |
f20ed39f | 757 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
d25a2a16 LP |
758 | }; |
759 | ||
760 | /* ----------------------------------------------------------------------------- | |
761 | * Probe/remove and init | |
762 | */ | |
763 | ||
764 | static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) | |
765 | { | |
766 | unsigned int i; | |
767 | ||
768 | /* Disable all contexts. */ | |
769 | for (i = 0; i < 4; ++i) | |
770 | ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); | |
771 | } | |
772 | ||
773 | static int ipmmu_probe(struct platform_device *pdev) | |
774 | { | |
775 | struct ipmmu_vmsa_device *mmu; | |
776 | struct resource *res; | |
777 | int irq; | |
778 | int ret; | |
779 | ||
275f5053 | 780 | if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) { |
d25a2a16 LP |
781 | dev_err(&pdev->dev, "missing platform data\n"); |
782 | return -EINVAL; | |
783 | } | |
784 | ||
785 | mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); | |
786 | if (!mmu) { | |
787 | dev_err(&pdev->dev, "cannot allocate device data\n"); | |
788 | return -ENOMEM; | |
789 | } | |
790 | ||
791 | mmu->dev = &pdev->dev; | |
d25a2a16 LP |
792 | mmu->num_utlbs = 32; |
793 | ||
794 | /* Map I/O memory and request IRQ. */ | |
795 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
796 | mmu->base = devm_ioremap_resource(&pdev->dev, res); | |
797 | if (IS_ERR(mmu->base)) | |
798 | return PTR_ERR(mmu->base); | |
799 | ||
275f5053 LP |
800 | /* |
801 | * The IPMMU has two register banks, for secure and non-secure modes. | |
802 | * The bank mapped at the beginning of the IPMMU address space | |
803 | * corresponds to the running mode of the CPU. When running in secure | |
804 | * mode the non-secure register bank is also available at an offset. | |
805 | * | |
806 | * Secure mode operation isn't clearly documented and is thus currently | |
807 | * not implemented in the driver. Furthermore, preliminary tests of | |
808 | * non-secure operation with the main register bank were not successful. | |
809 | * Offset the registers base unconditionally to point to the non-secure | |
810 | * alias space for now. | |
811 | */ | |
812 | mmu->base += IM_NS_ALIAS_OFFSET; | |
813 | ||
d25a2a16 LP |
814 | irq = platform_get_irq(pdev, 0); |
815 | if (irq < 0) { | |
816 | dev_err(&pdev->dev, "no IRQ found\n"); | |
817 | return irq; | |
818 | } | |
819 | ||
820 | ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, | |
821 | dev_name(&pdev->dev), mmu); | |
822 | if (ret < 0) { | |
823 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); | |
e222d6a4 | 824 | return ret; |
d25a2a16 LP |
825 | } |
826 | ||
827 | ipmmu_device_reset(mmu); | |
828 | ||
829 | /* | |
830 | * We can't create the ARM mapping here as it requires the bus to have | |
831 | * an IOMMU, which only happens when bus_set_iommu() is called in | |
832 | * ipmmu_init() after the probe function returns. | |
833 | */ | |
834 | ||
835 | spin_lock(&ipmmu_devices_lock); | |
836 | list_add(&mmu->list, &ipmmu_devices); | |
837 | spin_unlock(&ipmmu_devices_lock); | |
838 | ||
839 | platform_set_drvdata(pdev, mmu); | |
840 | ||
841 | return 0; | |
842 | } | |
843 | ||
844 | static int ipmmu_remove(struct platform_device *pdev) | |
845 | { | |
846 | struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); | |
847 | ||
848 | spin_lock(&ipmmu_devices_lock); | |
849 | list_del(&mmu->list); | |
850 | spin_unlock(&ipmmu_devices_lock); | |
851 | ||
852 | arm_iommu_release_mapping(mmu->mapping); | |
853 | ||
854 | ipmmu_device_reset(mmu); | |
855 | ||
856 | return 0; | |
857 | } | |
858 | ||
275f5053 LP |
859 | static const struct of_device_id ipmmu_of_ids[] = { |
860 | { .compatible = "renesas,ipmmu-vmsa", }, | |
861 | }; | |
862 | ||
d25a2a16 LP |
863 | static struct platform_driver ipmmu_driver = { |
864 | .driver = { | |
d25a2a16 | 865 | .name = "ipmmu-vmsa", |
275f5053 | 866 | .of_match_table = of_match_ptr(ipmmu_of_ids), |
d25a2a16 LP |
867 | }, |
868 | .probe = ipmmu_probe, | |
869 | .remove = ipmmu_remove, | |
870 | }; | |
871 | ||
872 | static int __init ipmmu_init(void) | |
873 | { | |
874 | int ret; | |
875 | ||
876 | ret = platform_driver_register(&ipmmu_driver); | |
877 | if (ret < 0) | |
878 | return ret; | |
879 | ||
880 | if (!iommu_present(&platform_bus_type)) | |
881 | bus_set_iommu(&platform_bus_type, &ipmmu_ops); | |
882 | ||
883 | return 0; | |
884 | } | |
885 | ||
886 | static void __exit ipmmu_exit(void) | |
887 | { | |
888 | return platform_driver_unregister(&ipmmu_driver); | |
889 | } | |
890 | ||
891 | subsys_initcall(ipmmu_init); | |
892 | module_exit(ipmmu_exit); | |
893 | ||
894 | MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); | |
895 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | |
896 | MODULE_LICENSE("GPL v2"); |