Commit | Line | Data |
---|---|---|
0ae349a0 RC |
1 | /* |
2 | * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | * | |
16 | * Copyright (C) 2013 ARM Limited | |
17 | * Copyright (C) 2017 Red Hat | |
18 | */ | |
19 | ||
20 | #include <linux/atomic.h> | |
21 | #include <linux/clk.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/dma-iommu.h> | |
24 | #include <linux/dma-mapping.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/io.h> | |
28 | #include <linux/io-64-nonatomic-hi-lo.h> | |
b77cf11f | 29 | #include <linux/io-pgtable.h> |
0ae349a0 RC |
30 | #include <linux/iommu.h> |
31 | #include <linux/iopoll.h> | |
32 | #include <linux/kconfig.h> | |
f295cf26 | 33 | #include <linux/init.h> |
0ae349a0 RC |
34 | #include <linux/mutex.h> |
35 | #include <linux/of.h> | |
36 | #include <linux/of_address.h> | |
37 | #include <linux/of_device.h> | |
38 | #include <linux/of_iommu.h> | |
39 | #include <linux/platform_device.h> | |
40 | #include <linux/pm.h> | |
41 | #include <linux/pm_runtime.h> | |
42 | #include <linux/qcom_scm.h> | |
43 | #include <linux/slab.h> | |
44 | #include <linux/spinlock.h> | |
45 | ||
0ae349a0 RC |
46 | #include "arm-smmu-regs.h" |
47 | ||
48 | #define SMMU_INTR_SEL_NS 0x2000 | |
49 | ||
50 | struct qcom_iommu_ctx; | |
51 | ||
52 | struct qcom_iommu_dev { | |
53 | /* IOMMU core code handle */ | |
54 | struct iommu_device iommu; | |
55 | struct device *dev; | |
56 | struct clk *iface_clk; | |
57 | struct clk *bus_clk; | |
58 | void __iomem *local_base; | |
59 | u32 sec_id; | |
60 | u8 num_ctxs; | |
61 | struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ | |
62 | }; | |
63 | ||
64 | struct qcom_iommu_ctx { | |
65 | struct device *dev; | |
66 | void __iomem *base; | |
67 | bool secure_init; | |
68 | u8 asid; /* asid and ctx bank # are 1:1 */ | |
049541e1 | 69 | struct iommu_domain *domain; |
0ae349a0 RC |
70 | }; |
71 | ||
72 | struct qcom_iommu_domain { | |
73 | struct io_pgtable_ops *pgtbl_ops; | |
74 | spinlock_t pgtbl_lock; | |
75 | struct mutex init_mutex; /* Protects iommu pointer */ | |
76 | struct iommu_domain domain; | |
77 | struct qcom_iommu_dev *iommu; | |
78 | }; | |
79 | ||
80 | static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) | |
81 | { | |
82 | return container_of(dom, struct qcom_iommu_domain, domain); | |
83 | } | |
84 | ||
85 | static const struct iommu_ops qcom_iommu_ops; | |
86 | ||
87 | static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) | |
88 | { | |
89 | if (!fwspec || fwspec->ops != &qcom_iommu_ops) | |
90 | return NULL; | |
91 | return fwspec->iommu_priv; | |
92 | } | |
93 | ||
94 | static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) | |
95 | { | |
96 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); | |
97 | if (!qcom_iommu) | |
98 | return NULL; | |
99 | return qcom_iommu->ctxs[asid - 1]; | |
100 | } | |
101 | ||
102 | static inline void | |
103 | iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val) | |
104 | { | |
105 | writel_relaxed(val, ctx->base + reg); | |
106 | } | |
107 | ||
108 | static inline void | |
109 | iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val) | |
110 | { | |
111 | writeq_relaxed(val, ctx->base + reg); | |
112 | } | |
113 | ||
114 | static inline u32 | |
115 | iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg) | |
116 | { | |
117 | return readl_relaxed(ctx->base + reg); | |
118 | } | |
119 | ||
120 | static inline u64 | |
121 | iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) | |
122 | { | |
123 | return readq_relaxed(ctx->base + reg); | |
124 | } | |
125 | ||
126 | static void qcom_iommu_tlb_sync(void *cookie) | |
127 | { | |
128 | struct iommu_fwspec *fwspec = cookie; | |
129 | unsigned i; | |
130 | ||
131 | for (i = 0; i < fwspec->num_ids; i++) { | |
132 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
133 | unsigned int val, ret; | |
134 | ||
135 | iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); | |
136 | ||
137 | ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val, | |
138 | (val & 0x1) == 0, 0, 5000000); | |
139 | if (ret) | |
140 | dev_err(ctx->dev, "timeout waiting for TLB SYNC\n"); | |
141 | } | |
142 | } | |
143 | ||
144 | static void qcom_iommu_tlb_inv_context(void *cookie) | |
145 | { | |
146 | struct iommu_fwspec *fwspec = cookie; | |
147 | unsigned i; | |
148 | ||
149 | for (i = 0; i < fwspec->num_ids; i++) { | |
150 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
151 | iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); | |
152 | } | |
153 | ||
154 | qcom_iommu_tlb_sync(cookie); | |
155 | } | |
156 | ||
157 | static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
158 | size_t granule, bool leaf, void *cookie) | |
159 | { | |
160 | struct iommu_fwspec *fwspec = cookie; | |
161 | unsigned i, reg; | |
162 | ||
163 | reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
164 | ||
165 | for (i = 0; i < fwspec->num_ids; i++) { | |
166 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
167 | size_t s = size; | |
168 | ||
169 | iova &= ~12UL; | |
170 | iova |= ctx->asid; | |
171 | do { | |
172 | iommu_writel(ctx, reg, iova); | |
173 | iova += granule; | |
174 | } while (s -= granule); | |
175 | } | |
176 | } | |
177 | ||
178 | static const struct iommu_gather_ops qcom_gather_ops = { | |
179 | .tlb_flush_all = qcom_iommu_tlb_inv_context, | |
180 | .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, | |
181 | .tlb_sync = qcom_iommu_tlb_sync, | |
182 | }; | |
183 | ||
184 | static irqreturn_t qcom_iommu_fault(int irq, void *dev) | |
185 | { | |
186 | struct qcom_iommu_ctx *ctx = dev; | |
187 | u32 fsr, fsynr; | |
188 | u64 iova; | |
189 | ||
190 | fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); | |
191 | ||
192 | if (!(fsr & FSR_FAULT)) | |
193 | return IRQ_NONE; | |
194 | ||
195 | fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); | |
196 | iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); | |
197 | ||
049541e1 RC |
198 | if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) { |
199 | dev_err_ratelimited(ctx->dev, | |
200 | "Unhandled context fault: fsr=0x%x, " | |
201 | "iova=0x%016llx, fsynr=0x%x, cb=%d\n", | |
202 | fsr, iova, fsynr, ctx->asid); | |
203 | } | |
0ae349a0 RC |
204 | |
205 | iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); | |
049541e1 | 206 | iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); |
0ae349a0 RC |
207 | |
208 | return IRQ_HANDLED; | |
209 | } | |
210 | ||
211 | static int qcom_iommu_init_domain(struct iommu_domain *domain, | |
212 | struct qcom_iommu_dev *qcom_iommu, | |
213 | struct iommu_fwspec *fwspec) | |
214 | { | |
215 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
216 | struct io_pgtable_ops *pgtbl_ops; | |
217 | struct io_pgtable_cfg pgtbl_cfg; | |
218 | int i, ret = 0; | |
219 | u32 reg; | |
220 | ||
221 | mutex_lock(&qcom_domain->init_mutex); | |
222 | if (qcom_domain->iommu) | |
223 | goto out_unlock; | |
224 | ||
225 | pgtbl_cfg = (struct io_pgtable_cfg) { | |
226 | .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, | |
227 | .ias = 32, | |
228 | .oas = 40, | |
229 | .tlb = &qcom_gather_ops, | |
230 | .iommu_dev = qcom_iommu->dev, | |
231 | }; | |
232 | ||
233 | qcom_domain->iommu = qcom_iommu; | |
234 | pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); | |
235 | if (!pgtbl_ops) { | |
236 | dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); | |
237 | ret = -ENOMEM; | |
238 | goto out_clear_iommu; | |
239 | } | |
240 | ||
241 | /* Update the domain's page sizes to reflect the page table format */ | |
242 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
243 | domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1; | |
244 | domain->geometry.force_aperture = true; | |
245 | ||
246 | for (i = 0; i < fwspec->num_ids; i++) { | |
247 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
248 | ||
249 | if (!ctx->secure_init) { | |
250 | ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); | |
251 | if (ret) { | |
252 | dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret); | |
253 | goto out_clear_iommu; | |
254 | } | |
255 | ctx->secure_init = true; | |
256 | } | |
257 | ||
258 | /* TTBRs */ | |
259 | iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, | |
260 | pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] | | |
261 | ((u64)ctx->asid << TTBRn_ASID_SHIFT)); | |
262 | iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, | |
263 | pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] | | |
264 | ((u64)ctx->asid << TTBRn_ASID_SHIFT)); | |
265 | ||
266 | /* TTBCR */ | |
267 | iommu_writel(ctx, ARM_SMMU_CB_TTBCR2, | |
268 | (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) | | |
269 | TTBCR2_SEP_UPSTREAM); | |
270 | iommu_writel(ctx, ARM_SMMU_CB_TTBCR, | |
271 | pgtbl_cfg.arm_lpae_s1_cfg.tcr); | |
272 | ||
273 | /* MAIRs (stage-1 only) */ | |
274 | iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, | |
275 | pgtbl_cfg.arm_lpae_s1_cfg.mair[0]); | |
276 | iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, | |
277 | pgtbl_cfg.arm_lpae_s1_cfg.mair[1]); | |
278 | ||
279 | /* SCTLR */ | |
280 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | | |
049541e1 | 281 | SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG; |
0ae349a0 RC |
282 | |
283 | if (IS_ENABLED(CONFIG_BIG_ENDIAN)) | |
284 | reg |= SCTLR_E; | |
285 | ||
286 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); | |
049541e1 RC |
287 | |
288 | ctx->domain = domain; | |
0ae349a0 RC |
289 | } |
290 | ||
291 | mutex_unlock(&qcom_domain->init_mutex); | |
292 | ||
293 | /* Publish page table ops for map/unmap */ | |
294 | qcom_domain->pgtbl_ops = pgtbl_ops; | |
295 | ||
296 | return 0; | |
297 | ||
298 | out_clear_iommu: | |
299 | qcom_domain->iommu = NULL; | |
300 | out_unlock: | |
301 | mutex_unlock(&qcom_domain->init_mutex); | |
302 | return ret; | |
303 | } | |
304 | ||
305 | static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type) | |
306 | { | |
307 | struct qcom_iommu_domain *qcom_domain; | |
308 | ||
309 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | |
310 | return NULL; | |
311 | /* | |
312 | * Allocate the domain and initialise some of its data structures. | |
313 | * We can't really do anything meaningful until we've added a | |
314 | * master. | |
315 | */ | |
316 | qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL); | |
317 | if (!qcom_domain) | |
318 | return NULL; | |
319 | ||
320 | if (type == IOMMU_DOMAIN_DMA && | |
321 | iommu_get_dma_cookie(&qcom_domain->domain)) { | |
322 | kfree(qcom_domain); | |
323 | return NULL; | |
324 | } | |
325 | ||
326 | mutex_init(&qcom_domain->init_mutex); | |
327 | spin_lock_init(&qcom_domain->pgtbl_lock); | |
328 | ||
329 | return &qcom_domain->domain; | |
330 | } | |
331 | ||
332 | static void qcom_iommu_domain_free(struct iommu_domain *domain) | |
333 | { | |
334 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
335 | ||
336 | if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */ | |
337 | return; | |
338 | ||
339 | iommu_put_dma_cookie(domain); | |
340 | ||
341 | /* NOTE: unmap can be called after client device is powered off, | |
342 | * for example, with GPUs or anything involving dma-buf. So we | |
343 | * cannot rely on the device_link. Make sure the IOMMU is on to | |
344 | * avoid unclocked accesses in the TLB inv path: | |
345 | */ | |
346 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
347 | ||
348 | free_io_pgtable_ops(qcom_domain->pgtbl_ops); | |
349 | ||
350 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
351 | ||
352 | kfree(qcom_domain); | |
353 | } | |
354 | ||
355 | static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
356 | { | |
2000e5f7 JR |
357 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
358 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); | |
0ae349a0 RC |
359 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); |
360 | int ret; | |
361 | ||
362 | if (!qcom_iommu) { | |
363 | dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n"); | |
364 | return -ENXIO; | |
365 | } | |
366 | ||
367 | /* Ensure that the domain is finalized */ | |
368 | pm_runtime_get_sync(qcom_iommu->dev); | |
2000e5f7 | 369 | ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); |
0ae349a0 RC |
370 | pm_runtime_put_sync(qcom_iommu->dev); |
371 | if (ret < 0) | |
372 | return ret; | |
373 | ||
374 | /* | |
375 | * Sanity check the domain. We don't support domains across | |
376 | * different IOMMUs. | |
377 | */ | |
378 | if (qcom_domain->iommu != qcom_iommu) { | |
379 | dev_err(dev, "cannot attach to IOMMU %s while already " | |
380 | "attached to domain on IOMMU %s\n", | |
381 | dev_name(qcom_domain->iommu->dev), | |
382 | dev_name(qcom_iommu->dev)); | |
383 | return -EINVAL; | |
384 | } | |
385 | ||
386 | return 0; | |
387 | } | |
388 | ||
389 | static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
390 | { | |
2000e5f7 | 391 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0ae349a0 RC |
392 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); |
393 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
394 | unsigned i; | |
395 | ||
396 | if (!qcom_domain->iommu) | |
397 | return; | |
398 | ||
399 | pm_runtime_get_sync(qcom_iommu->dev); | |
400 | for (i = 0; i < fwspec->num_ids; i++) { | |
401 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
402 | ||
403 | /* Disable the context bank: */ | |
404 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); | |
049541e1 RC |
405 | |
406 | ctx->domain = NULL; | |
0ae349a0 RC |
407 | } |
408 | pm_runtime_put_sync(qcom_iommu->dev); | |
409 | ||
410 | qcom_domain->iommu = NULL; | |
411 | } | |
412 | ||
413 | static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
414 | phys_addr_t paddr, size_t size, int prot) | |
415 | { | |
416 | int ret; | |
417 | unsigned long flags; | |
418 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
419 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
420 | ||
421 | if (!ops) | |
422 | return -ENODEV; | |
423 | ||
424 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
425 | ret = ops->map(ops, iova, paddr, size, prot); | |
426 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
427 | return ret; | |
428 | } | |
429 | ||
430 | static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
431 | size_t size) | |
432 | { | |
433 | size_t ret; | |
434 | unsigned long flags; | |
435 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
436 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
437 | ||
438 | if (!ops) | |
439 | return 0; | |
440 | ||
441 | /* NOTE: unmap can be called after client device is powered off, | |
442 | * for example, with GPUs or anything involving dma-buf. So we | |
443 | * cannot rely on the device_link. Make sure the IOMMU is on to | |
444 | * avoid unclocked accesses in the TLB inv path: | |
445 | */ | |
446 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
447 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
448 | ret = ops->unmap(ops, iova, size); | |
449 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
450 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
451 | ||
452 | return ret; | |
453 | } | |
454 | ||
4d689b61 RM |
455 | static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) |
456 | { | |
457 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
458 | struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, | |
459 | struct io_pgtable, ops); | |
460 | if (!qcom_domain->pgtbl_ops) | |
461 | return; | |
462 | ||
463 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
464 | qcom_iommu_tlb_sync(pgtable->cookie); | |
465 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
466 | } | |
467 | ||
0ae349a0 RC |
468 | static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, |
469 | dma_addr_t iova) | |
470 | { | |
471 | phys_addr_t ret; | |
472 | unsigned long flags; | |
473 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
474 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
475 | ||
476 | if (!ops) | |
477 | return 0; | |
478 | ||
479 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
480 | ret = ops->iova_to_phys(ops, iova); | |
481 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
482 | ||
483 | return ret; | |
484 | } | |
485 | ||
486 | static bool qcom_iommu_capable(enum iommu_cap cap) | |
487 | { | |
488 | switch (cap) { | |
489 | case IOMMU_CAP_CACHE_COHERENCY: | |
490 | /* | |
491 | * Return true here as the SMMU can always send out coherent | |
492 | * requests. | |
493 | */ | |
494 | return true; | |
495 | case IOMMU_CAP_NOEXEC: | |
496 | return true; | |
497 | default: | |
498 | return false; | |
499 | } | |
500 | } | |
501 | ||
502 | static int qcom_iommu_add_device(struct device *dev) | |
503 | { | |
2000e5f7 | 504 | struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); |
0ae349a0 RC |
505 | struct iommu_group *group; |
506 | struct device_link *link; | |
507 | ||
508 | if (!qcom_iommu) | |
509 | return -ENODEV; | |
510 | ||
511 | /* | |
512 | * Establish the link between iommu and master, so that the | |
513 | * iommu gets runtime enabled/disabled as per the master's | |
514 | * needs. | |
515 | */ | |
516 | link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME); | |
517 | if (!link) { | |
518 | dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n", | |
519 | dev_name(qcom_iommu->dev), dev_name(dev)); | |
520 | return -ENODEV; | |
521 | } | |
522 | ||
523 | group = iommu_group_get_for_dev(dev); | |
524 | if (IS_ERR_OR_NULL(group)) | |
525 | return PTR_ERR_OR_ZERO(group); | |
526 | ||
527 | iommu_group_put(group); | |
528 | iommu_device_link(&qcom_iommu->iommu, dev); | |
529 | ||
530 | return 0; | |
531 | } | |
532 | ||
533 | static void qcom_iommu_remove_device(struct device *dev) | |
534 | { | |
2000e5f7 | 535 | struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); |
0ae349a0 RC |
536 | |
537 | if (!qcom_iommu) | |
538 | return; | |
539 | ||
540 | iommu_device_unlink(&qcom_iommu->iommu, dev); | |
541 | iommu_group_remove_device(dev); | |
542 | iommu_fwspec_free(dev); | |
543 | } | |
544 | ||
545 | static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
546 | { | |
2000e5f7 | 547 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0ae349a0 RC |
548 | struct qcom_iommu_dev *qcom_iommu; |
549 | struct platform_device *iommu_pdev; | |
550 | unsigned asid = args->args[0]; | |
551 | ||
552 | if (args->args_count != 1) { | |
553 | dev_err(dev, "incorrect number of iommu params found for %s " | |
554 | "(found %d, expected 1)\n", | |
555 | args->np->full_name, args->args_count); | |
556 | return -EINVAL; | |
557 | } | |
558 | ||
559 | iommu_pdev = of_find_device_by_node(args->np); | |
560 | if (WARN_ON(!iommu_pdev)) | |
561 | return -EINVAL; | |
562 | ||
563 | qcom_iommu = platform_get_drvdata(iommu_pdev); | |
564 | ||
565 | /* make sure the asid specified in dt is valid, so we don't have | |
566 | * to sanity check this elsewhere, since 'asid - 1' is used to | |
567 | * index into qcom_iommu->ctxs: | |
568 | */ | |
569 | if (WARN_ON(asid < 1) || | |
570 | WARN_ON(asid > qcom_iommu->num_ctxs)) | |
571 | return -EINVAL; | |
572 | ||
2000e5f7 JR |
573 | if (!fwspec->iommu_priv) { |
574 | fwspec->iommu_priv = qcom_iommu; | |
0ae349a0 RC |
575 | } else { |
576 | /* make sure devices iommus dt node isn't referring to | |
577 | * multiple different iommu devices. Multiple context | |
578 | * banks are ok, but multiple devices are not: | |
579 | */ | |
2000e5f7 | 580 | if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) |
0ae349a0 RC |
581 | return -EINVAL; |
582 | } | |
583 | ||
584 | return iommu_fwspec_add_ids(dev, &asid, 1); | |
585 | } | |
586 | ||
587 | static const struct iommu_ops qcom_iommu_ops = { | |
588 | .capable = qcom_iommu_capable, | |
589 | .domain_alloc = qcom_iommu_domain_alloc, | |
590 | .domain_free = qcom_iommu_domain_free, | |
591 | .attach_dev = qcom_iommu_attach_dev, | |
592 | .detach_dev = qcom_iommu_detach_dev, | |
593 | .map = qcom_iommu_map, | |
594 | .unmap = qcom_iommu_unmap, | |
4d689b61 RM |
595 | .flush_iotlb_all = qcom_iommu_iotlb_sync, |
596 | .iotlb_sync = qcom_iommu_iotlb_sync, | |
0ae349a0 RC |
597 | .iova_to_phys = qcom_iommu_iova_to_phys, |
598 | .add_device = qcom_iommu_add_device, | |
599 | .remove_device = qcom_iommu_remove_device, | |
600 | .device_group = generic_device_group, | |
601 | .of_xlate = qcom_iommu_of_xlate, | |
602 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
603 | }; | |
604 | ||
605 | static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu) | |
606 | { | |
607 | int ret; | |
608 | ||
609 | ret = clk_prepare_enable(qcom_iommu->iface_clk); | |
610 | if (ret) { | |
611 | dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n"); | |
612 | return ret; | |
613 | } | |
614 | ||
615 | ret = clk_prepare_enable(qcom_iommu->bus_clk); | |
616 | if (ret) { | |
617 | dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n"); | |
618 | clk_disable_unprepare(qcom_iommu->iface_clk); | |
619 | return ret; | |
620 | } | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
625 | static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu) | |
626 | { | |
627 | clk_disable_unprepare(qcom_iommu->bus_clk); | |
628 | clk_disable_unprepare(qcom_iommu->iface_clk); | |
629 | } | |
630 | ||
d051f28c SV |
631 | static int qcom_iommu_sec_ptbl_init(struct device *dev) |
632 | { | |
633 | size_t psize = 0; | |
634 | unsigned int spare = 0; | |
635 | void *cpu_addr; | |
636 | dma_addr_t paddr; | |
637 | unsigned long attrs; | |
638 | static bool allocated = false; | |
639 | int ret; | |
640 | ||
641 | if (allocated) | |
642 | return 0; | |
643 | ||
644 | ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize); | |
645 | if (ret) { | |
646 | dev_err(dev, "failed to get iommu secure pgtable size (%d)\n", | |
647 | ret); | |
648 | return ret; | |
649 | } | |
650 | ||
651 | dev_info(dev, "iommu sec: pgtable size: %zu\n", psize); | |
652 | ||
653 | attrs = DMA_ATTR_NO_KERNEL_MAPPING; | |
654 | ||
655 | cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs); | |
656 | if (!cpu_addr) { | |
657 | dev_err(dev, "failed to allocate %zu bytes for pgtable\n", | |
658 | psize); | |
659 | return -ENOMEM; | |
660 | } | |
661 | ||
662 | ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare); | |
663 | if (ret) { | |
664 | dev_err(dev, "failed to init iommu pgtable (%d)\n", ret); | |
665 | goto free_mem; | |
666 | } | |
667 | ||
668 | allocated = true; | |
669 | return 0; | |
670 | ||
671 | free_mem: | |
672 | dma_free_attrs(dev, psize, cpu_addr, paddr, attrs); | |
673 | return ret; | |
674 | } | |
675 | ||
0ae349a0 RC |
676 | static int get_asid(const struct device_node *np) |
677 | { | |
678 | u32 reg; | |
679 | ||
680 | /* read the "reg" property directly to get the relative address | |
681 | * of the context bank, and calculate the asid from that: | |
682 | */ | |
683 | if (of_property_read_u32_index(np, "reg", 0, ®)) | |
684 | return -ENODEV; | |
685 | ||
686 | return reg / 0x1000; /* context banks are 0x1000 apart */ | |
687 | } | |
688 | ||
689 | static int qcom_iommu_ctx_probe(struct platform_device *pdev) | |
690 | { | |
691 | struct qcom_iommu_ctx *ctx; | |
692 | struct device *dev = &pdev->dev; | |
693 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); | |
694 | struct resource *res; | |
695 | int ret, irq; | |
696 | ||
697 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); | |
698 | if (!ctx) | |
699 | return -ENOMEM; | |
700 | ||
701 | ctx->dev = dev; | |
702 | platform_set_drvdata(pdev, ctx); | |
703 | ||
704 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
705 | ctx->base = devm_ioremap_resource(dev, res); | |
706 | if (IS_ERR(ctx->base)) | |
707 | return PTR_ERR(ctx->base); | |
708 | ||
709 | irq = platform_get_irq(pdev, 0); | |
710 | if (irq < 0) { | |
711 | dev_err(dev, "failed to get irq\n"); | |
712 | return -ENODEV; | |
713 | } | |
714 | ||
715 | /* clear IRQs before registering fault handler, just in case the | |
716 | * boot-loader left us a surprise: | |
717 | */ | |
718 | iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); | |
719 | ||
720 | ret = devm_request_irq(dev, irq, | |
721 | qcom_iommu_fault, | |
722 | IRQF_SHARED, | |
723 | "qcom-iommu-fault", | |
724 | ctx); | |
725 | if (ret) { | |
726 | dev_err(dev, "failed to request IRQ %u\n", irq); | |
727 | return ret; | |
728 | } | |
729 | ||
730 | ret = get_asid(dev->of_node); | |
731 | if (ret < 0) { | |
732 | dev_err(dev, "missing reg property\n"); | |
733 | return ret; | |
734 | } | |
735 | ||
736 | ctx->asid = ret; | |
737 | ||
738 | dev_dbg(dev, "found asid %u\n", ctx->asid); | |
739 | ||
740 | qcom_iommu->ctxs[ctx->asid - 1] = ctx; | |
741 | ||
742 | return 0; | |
743 | } | |
744 | ||
745 | static int qcom_iommu_ctx_remove(struct platform_device *pdev) | |
746 | { | |
747 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent); | |
748 | struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev); | |
749 | ||
750 | platform_set_drvdata(pdev, NULL); | |
751 | ||
752 | qcom_iommu->ctxs[ctx->asid - 1] = NULL; | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
757 | static const struct of_device_id ctx_of_match[] = { | |
758 | { .compatible = "qcom,msm-iommu-v1-ns" }, | |
759 | { .compatible = "qcom,msm-iommu-v1-sec" }, | |
760 | { /* sentinel */ } | |
761 | }; | |
762 | ||
763 | static struct platform_driver qcom_iommu_ctx_driver = { | |
764 | .driver = { | |
765 | .name = "qcom-iommu-ctx", | |
766 | .of_match_table = of_match_ptr(ctx_of_match), | |
767 | }, | |
768 | .probe = qcom_iommu_ctx_probe, | |
769 | .remove = qcom_iommu_ctx_remove, | |
770 | }; | |
771 | ||
d051f28c SV |
772 | static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) |
773 | { | |
774 | struct device_node *child; | |
775 | ||
776 | for_each_child_of_node(qcom_iommu->dev->of_node, child) | |
777 | if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) | |
778 | return true; | |
779 | ||
780 | return false; | |
781 | } | |
782 | ||
0ae349a0 RC |
783 | static int qcom_iommu_device_probe(struct platform_device *pdev) |
784 | { | |
785 | struct device_node *child; | |
786 | struct qcom_iommu_dev *qcom_iommu; | |
787 | struct device *dev = &pdev->dev; | |
788 | struct resource *res; | |
789 | int ret, sz, max_asid = 0; | |
790 | ||
791 | /* find the max asid (which is 1:1 to ctx bank idx), so we know how | |
792 | * many child ctx devices we have: | |
793 | */ | |
794 | for_each_child_of_node(dev->of_node, child) | |
795 | max_asid = max(max_asid, get_asid(child)); | |
796 | ||
797 | sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0])); | |
798 | ||
799 | qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL); | |
800 | if (!qcom_iommu) | |
801 | return -ENOMEM; | |
802 | qcom_iommu->num_ctxs = max_asid; | |
803 | qcom_iommu->dev = dev; | |
804 | ||
805 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
806 | if (res) | |
807 | qcom_iommu->local_base = devm_ioremap_resource(dev, res); | |
808 | ||
809 | qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); | |
810 | if (IS_ERR(qcom_iommu->iface_clk)) { | |
811 | dev_err(dev, "failed to get iface clock\n"); | |
812 | return PTR_ERR(qcom_iommu->iface_clk); | |
813 | } | |
814 | ||
815 | qcom_iommu->bus_clk = devm_clk_get(dev, "bus"); | |
816 | if (IS_ERR(qcom_iommu->bus_clk)) { | |
817 | dev_err(dev, "failed to get bus clock\n"); | |
818 | return PTR_ERR(qcom_iommu->bus_clk); | |
819 | } | |
820 | ||
821 | if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id", | |
822 | &qcom_iommu->sec_id)) { | |
823 | dev_err(dev, "missing qcom,iommu-secure-id property\n"); | |
824 | return -ENODEV; | |
825 | } | |
826 | ||
d051f28c SV |
827 | if (qcom_iommu_has_secure_context(qcom_iommu)) { |
828 | ret = qcom_iommu_sec_ptbl_init(dev); | |
829 | if (ret) { | |
830 | dev_err(dev, "cannot init secure pg table(%d)\n", ret); | |
831 | return ret; | |
832 | } | |
833 | } | |
834 | ||
0ae349a0 RC |
835 | platform_set_drvdata(pdev, qcom_iommu); |
836 | ||
837 | pm_runtime_enable(dev); | |
838 | ||
839 | /* register context bank devices, which are child nodes: */ | |
840 | ret = devm_of_platform_populate(dev); | |
841 | if (ret) { | |
842 | dev_err(dev, "Failed to populate iommu contexts\n"); | |
843 | return ret; | |
844 | } | |
845 | ||
846 | ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL, | |
847 | dev_name(dev)); | |
848 | if (ret) { | |
849 | dev_err(dev, "Failed to register iommu in sysfs\n"); | |
850 | return ret; | |
851 | } | |
852 | ||
853 | iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops); | |
854 | iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode); | |
855 | ||
856 | ret = iommu_device_register(&qcom_iommu->iommu); | |
857 | if (ret) { | |
858 | dev_err(dev, "Failed to register iommu\n"); | |
859 | return ret; | |
860 | } | |
861 | ||
862 | bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); | |
863 | ||
864 | if (qcom_iommu->local_base) { | |
865 | pm_runtime_get_sync(dev); | |
866 | writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS); | |
867 | pm_runtime_put_sync(dev); | |
868 | } | |
869 | ||
870 | return 0; | |
871 | } | |
872 | ||
873 | static int qcom_iommu_device_remove(struct platform_device *pdev) | |
874 | { | |
875 | struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); | |
876 | ||
877 | bus_set_iommu(&platform_bus_type, NULL); | |
878 | ||
879 | pm_runtime_force_suspend(&pdev->dev); | |
880 | platform_set_drvdata(pdev, NULL); | |
881 | iommu_device_sysfs_remove(&qcom_iommu->iommu); | |
882 | iommu_device_unregister(&qcom_iommu->iommu); | |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
6ce5b0f2 | 887 | static int __maybe_unused qcom_iommu_resume(struct device *dev) |
0ae349a0 | 888 | { |
7d1bf14f | 889 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
0ae349a0 RC |
890 | |
891 | return qcom_iommu_enable_clocks(qcom_iommu); | |
892 | } | |
893 | ||
6ce5b0f2 | 894 | static int __maybe_unused qcom_iommu_suspend(struct device *dev) |
0ae349a0 | 895 | { |
7d1bf14f | 896 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
0ae349a0 RC |
897 | |
898 | qcom_iommu_disable_clocks(qcom_iommu); | |
899 | ||
900 | return 0; | |
901 | } | |
0ae349a0 RC |
902 | |
903 | static const struct dev_pm_ops qcom_iommu_pm_ops = { | |
904 | SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL) | |
905 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | |
906 | pm_runtime_force_resume) | |
907 | }; | |
908 | ||
909 | static const struct of_device_id qcom_iommu_of_match[] = { | |
910 | { .compatible = "qcom,msm-iommu-v1" }, | |
911 | { /* sentinel */ } | |
912 | }; | |
0ae349a0 RC |
913 | |
914 | static struct platform_driver qcom_iommu_driver = { | |
915 | .driver = { | |
916 | .name = "qcom-iommu", | |
917 | .of_match_table = of_match_ptr(qcom_iommu_of_match), | |
918 | .pm = &qcom_iommu_pm_ops, | |
919 | }, | |
920 | .probe = qcom_iommu_device_probe, | |
921 | .remove = qcom_iommu_device_remove, | |
922 | }; | |
923 | ||
924 | static int __init qcom_iommu_init(void) | |
925 | { | |
926 | int ret; | |
927 | ||
928 | ret = platform_driver_register(&qcom_iommu_ctx_driver); | |
929 | if (ret) | |
930 | return ret; | |
931 | ||
932 | ret = platform_driver_register(&qcom_iommu_driver); | |
933 | if (ret) | |
934 | platform_driver_unregister(&qcom_iommu_ctx_driver); | |
935 | ||
936 | return ret; | |
937 | } | |
f295cf26 | 938 | device_initcall(qcom_iommu_init); |