Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0ae349a0 RC |
2 | /* |
3 | * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c | |
4 | * | |
0ae349a0 RC |
5 | * Copyright (C) 2013 ARM Limited |
6 | * Copyright (C) 2017 Red Hat | |
7 | */ | |
8 | ||
9 | #include <linux/atomic.h> | |
620565a7 | 10 | #include <linux/bitfield.h> |
0ae349a0 RC |
11 | #include <linux/clk.h> |
12 | #include <linux/delay.h> | |
13 | #include <linux/dma-iommu.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/io-64-nonatomic-hi-lo.h> | |
b77cf11f | 19 | #include <linux/io-pgtable.h> |
0ae349a0 RC |
20 | #include <linux/iommu.h> |
21 | #include <linux/iopoll.h> | |
22 | #include <linux/kconfig.h> | |
f295cf26 | 23 | #include <linux/init.h> |
0ae349a0 RC |
24 | #include <linux/mutex.h> |
25 | #include <linux/of.h> | |
26 | #include <linux/of_address.h> | |
27 | #include <linux/of_device.h> | |
28 | #include <linux/of_iommu.h> | |
29 | #include <linux/platform_device.h> | |
30 | #include <linux/pm.h> | |
31 | #include <linux/pm_runtime.h> | |
32 | #include <linux/qcom_scm.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/spinlock.h> | |
35 | ||
c5fc6488 | 36 | #include "arm-smmu.h" |
0ae349a0 RC |
37 | |
38 | #define SMMU_INTR_SEL_NS 0x2000 | |
39 | ||
40 | struct qcom_iommu_ctx; | |
41 | ||
42 | struct qcom_iommu_dev { | |
43 | /* IOMMU core code handle */ | |
44 | struct iommu_device iommu; | |
45 | struct device *dev; | |
46 | struct clk *iface_clk; | |
47 | struct clk *bus_clk; | |
48 | void __iomem *local_base; | |
49 | u32 sec_id; | |
50 | u8 num_ctxs; | |
51 | struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ | |
52 | }; | |
53 | ||
54 | struct qcom_iommu_ctx { | |
55 | struct device *dev; | |
56 | void __iomem *base; | |
57 | bool secure_init; | |
58 | u8 asid; /* asid and ctx bank # are 1:1 */ | |
049541e1 | 59 | struct iommu_domain *domain; |
0ae349a0 RC |
60 | }; |
61 | ||
62 | struct qcom_iommu_domain { | |
63 | struct io_pgtable_ops *pgtbl_ops; | |
64 | spinlock_t pgtbl_lock; | |
65 | struct mutex init_mutex; /* Protects iommu pointer */ | |
66 | struct iommu_domain domain; | |
67 | struct qcom_iommu_dev *iommu; | |
68 | }; | |
69 | ||
70 | static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) | |
71 | { | |
72 | return container_of(dom, struct qcom_iommu_domain, domain); | |
73 | } | |
74 | ||
75 | static const struct iommu_ops qcom_iommu_ops; | |
76 | ||
77 | static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) | |
78 | { | |
79 | if (!fwspec || fwspec->ops != &qcom_iommu_ops) | |
80 | return NULL; | |
81 | return fwspec->iommu_priv; | |
82 | } | |
83 | ||
84 | static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) | |
85 | { | |
86 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); | |
87 | if (!qcom_iommu) | |
88 | return NULL; | |
89 | return qcom_iommu->ctxs[asid - 1]; | |
90 | } | |
91 | ||
92 | static inline void | |
93 | iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val) | |
94 | { | |
95 | writel_relaxed(val, ctx->base + reg); | |
96 | } | |
97 | ||
98 | static inline void | |
99 | iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val) | |
100 | { | |
101 | writeq_relaxed(val, ctx->base + reg); | |
102 | } | |
103 | ||
104 | static inline u32 | |
105 | iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg) | |
106 | { | |
107 | return readl_relaxed(ctx->base + reg); | |
108 | } | |
109 | ||
110 | static inline u64 | |
111 | iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) | |
112 | { | |
113 | return readq_relaxed(ctx->base + reg); | |
114 | } | |
115 | ||
116 | static void qcom_iommu_tlb_sync(void *cookie) | |
117 | { | |
118 | struct iommu_fwspec *fwspec = cookie; | |
119 | unsigned i; | |
120 | ||
121 | for (i = 0; i < fwspec->num_ids; i++) { | |
122 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
123 | unsigned int val, ret; | |
124 | ||
125 | iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); | |
126 | ||
127 | ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val, | |
128 | (val & 0x1) == 0, 0, 5000000); | |
129 | if (ret) | |
130 | dev_err(ctx->dev, "timeout waiting for TLB SYNC\n"); | |
131 | } | |
132 | } | |
133 | ||
134 | static void qcom_iommu_tlb_inv_context(void *cookie) | |
135 | { | |
136 | struct iommu_fwspec *fwspec = cookie; | |
137 | unsigned i; | |
138 | ||
139 | for (i = 0; i < fwspec->num_ids; i++) { | |
140 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
141 | iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); | |
142 | } | |
143 | ||
144 | qcom_iommu_tlb_sync(cookie); | |
145 | } | |
146 | ||
147 | static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
148 | size_t granule, bool leaf, void *cookie) | |
149 | { | |
150 | struct iommu_fwspec *fwspec = cookie; | |
151 | unsigned i, reg; | |
152 | ||
153 | reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
154 | ||
155 | for (i = 0; i < fwspec->num_ids; i++) { | |
156 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
157 | size_t s = size; | |
158 | ||
a5b396ce | 159 | iova = (iova >> 12) << 12; |
0ae349a0 RC |
160 | iova |= ctx->asid; |
161 | do { | |
162 | iommu_writel(ctx, reg, iova); | |
163 | iova += granule; | |
164 | } while (s -= granule); | |
165 | } | |
166 | } | |
167 | ||
05aed941 WD |
168 | static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, |
169 | size_t granule, void *cookie) | |
170 | { | |
171 | qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); | |
172 | qcom_iommu_tlb_sync(cookie); | |
173 | } | |
174 | ||
175 | static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, | |
176 | size_t granule, void *cookie) | |
177 | { | |
178 | qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie); | |
179 | qcom_iommu_tlb_sync(cookie); | |
180 | } | |
181 | ||
3951c41a WD |
182 | static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, |
183 | unsigned long iova, size_t granule, | |
abfd6fe0 WD |
184 | void *cookie) |
185 | { | |
186 | qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); | |
187 | } | |
188 | ||
298f7889 | 189 | static const struct iommu_flush_ops qcom_flush_ops = { |
0ae349a0 | 190 | .tlb_flush_all = qcom_iommu_tlb_inv_context, |
05aed941 WD |
191 | .tlb_flush_walk = qcom_iommu_tlb_flush_walk, |
192 | .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, | |
abfd6fe0 | 193 | .tlb_add_page = qcom_iommu_tlb_add_page, |
0ae349a0 RC |
194 | }; |
195 | ||
196 | static irqreturn_t qcom_iommu_fault(int irq, void *dev) | |
197 | { | |
198 | struct qcom_iommu_ctx *ctx = dev; | |
199 | u32 fsr, fsynr; | |
200 | u64 iova; | |
201 | ||
202 | fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); | |
203 | ||
204 | if (!(fsr & FSR_FAULT)) | |
205 | return IRQ_NONE; | |
206 | ||
207 | fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); | |
208 | iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); | |
209 | ||
049541e1 RC |
210 | if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) { |
211 | dev_err_ratelimited(ctx->dev, | |
212 | "Unhandled context fault: fsr=0x%x, " | |
213 | "iova=0x%016llx, fsynr=0x%x, cb=%d\n", | |
214 | fsr, iova, fsynr, ctx->asid); | |
215 | } | |
0ae349a0 RC |
216 | |
217 | iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); | |
049541e1 | 218 | iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); |
0ae349a0 RC |
219 | |
220 | return IRQ_HANDLED; | |
221 | } | |
222 | ||
223 | static int qcom_iommu_init_domain(struct iommu_domain *domain, | |
224 | struct qcom_iommu_dev *qcom_iommu, | |
225 | struct iommu_fwspec *fwspec) | |
226 | { | |
227 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
228 | struct io_pgtable_ops *pgtbl_ops; | |
229 | struct io_pgtable_cfg pgtbl_cfg; | |
230 | int i, ret = 0; | |
231 | u32 reg; | |
232 | ||
233 | mutex_lock(&qcom_domain->init_mutex); | |
234 | if (qcom_domain->iommu) | |
235 | goto out_unlock; | |
236 | ||
237 | pgtbl_cfg = (struct io_pgtable_cfg) { | |
238 | .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, | |
239 | .ias = 32, | |
240 | .oas = 40, | |
298f7889 | 241 | .tlb = &qcom_flush_ops, |
0ae349a0 RC |
242 | .iommu_dev = qcom_iommu->dev, |
243 | }; | |
244 | ||
245 | qcom_domain->iommu = qcom_iommu; | |
246 | pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); | |
247 | if (!pgtbl_ops) { | |
248 | dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); | |
249 | ret = -ENOMEM; | |
250 | goto out_clear_iommu; | |
251 | } | |
252 | ||
253 | /* Update the domain's page sizes to reflect the page table format */ | |
254 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
255 | domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1; | |
256 | domain->geometry.force_aperture = true; | |
257 | ||
258 | for (i = 0; i < fwspec->num_ids; i++) { | |
259 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
260 | ||
261 | if (!ctx->secure_init) { | |
262 | ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); | |
263 | if (ret) { | |
264 | dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret); | |
265 | goto out_clear_iommu; | |
266 | } | |
267 | ctx->secure_init = true; | |
268 | } | |
269 | ||
270 | /* TTBRs */ | |
271 | iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, | |
272 | pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] | | |
620565a7 | 273 | FIELD_PREP(TTBRn_ASID, ctx->asid)); |
0ae349a0 RC |
274 | iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, |
275 | pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] | | |
620565a7 | 276 | FIELD_PREP(TTBRn_ASID, ctx->asid)); |
0ae349a0 | 277 | |
620565a7 RM |
278 | /* TCR */ |
279 | iommu_writel(ctx, ARM_SMMU_CB_TCR2, | |
0ae349a0 | 280 | (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) | |
620565a7 RM |
281 | FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM)); |
282 | iommu_writel(ctx, ARM_SMMU_CB_TCR, | |
0ae349a0 RC |
283 | pgtbl_cfg.arm_lpae_s1_cfg.tcr); |
284 | ||
285 | /* MAIRs (stage-1 only) */ | |
286 | iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, | |
287 | pgtbl_cfg.arm_lpae_s1_cfg.mair[0]); | |
288 | iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, | |
289 | pgtbl_cfg.arm_lpae_s1_cfg.mair[1]); | |
290 | ||
291 | /* SCTLR */ | |
292 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | | |
049541e1 | 293 | SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG; |
0ae349a0 RC |
294 | |
295 | if (IS_ENABLED(CONFIG_BIG_ENDIAN)) | |
296 | reg |= SCTLR_E; | |
297 | ||
298 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); | |
049541e1 RC |
299 | |
300 | ctx->domain = domain; | |
0ae349a0 RC |
301 | } |
302 | ||
303 | mutex_unlock(&qcom_domain->init_mutex); | |
304 | ||
305 | /* Publish page table ops for map/unmap */ | |
306 | qcom_domain->pgtbl_ops = pgtbl_ops; | |
307 | ||
308 | return 0; | |
309 | ||
310 | out_clear_iommu: | |
311 | qcom_domain->iommu = NULL; | |
312 | out_unlock: | |
313 | mutex_unlock(&qcom_domain->init_mutex); | |
314 | return ret; | |
315 | } | |
316 | ||
317 | static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type) | |
318 | { | |
319 | struct qcom_iommu_domain *qcom_domain; | |
320 | ||
321 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | |
322 | return NULL; | |
323 | /* | |
324 | * Allocate the domain and initialise some of its data structures. | |
325 | * We can't really do anything meaningful until we've added a | |
326 | * master. | |
327 | */ | |
328 | qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL); | |
329 | if (!qcom_domain) | |
330 | return NULL; | |
331 | ||
332 | if (type == IOMMU_DOMAIN_DMA && | |
333 | iommu_get_dma_cookie(&qcom_domain->domain)) { | |
334 | kfree(qcom_domain); | |
335 | return NULL; | |
336 | } | |
337 | ||
338 | mutex_init(&qcom_domain->init_mutex); | |
339 | spin_lock_init(&qcom_domain->pgtbl_lock); | |
340 | ||
341 | return &qcom_domain->domain; | |
342 | } | |
343 | ||
344 | static void qcom_iommu_domain_free(struct iommu_domain *domain) | |
345 | { | |
346 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
347 | ||
348 | if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */ | |
349 | return; | |
350 | ||
351 | iommu_put_dma_cookie(domain); | |
352 | ||
353 | /* NOTE: unmap can be called after client device is powered off, | |
354 | * for example, with GPUs or anything involving dma-buf. So we | |
355 | * cannot rely on the device_link. Make sure the IOMMU is on to | |
356 | * avoid unclocked accesses in the TLB inv path: | |
357 | */ | |
358 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
359 | ||
360 | free_io_pgtable_ops(qcom_domain->pgtbl_ops); | |
361 | ||
362 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
363 | ||
364 | kfree(qcom_domain); | |
365 | } | |
366 | ||
367 | static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
368 | { | |
2000e5f7 JR |
369 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
370 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); | |
0ae349a0 RC |
371 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); |
372 | int ret; | |
373 | ||
374 | if (!qcom_iommu) { | |
375 | dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n"); | |
376 | return -ENXIO; | |
377 | } | |
378 | ||
379 | /* Ensure that the domain is finalized */ | |
380 | pm_runtime_get_sync(qcom_iommu->dev); | |
2000e5f7 | 381 | ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); |
0ae349a0 RC |
382 | pm_runtime_put_sync(qcom_iommu->dev); |
383 | if (ret < 0) | |
384 | return ret; | |
385 | ||
386 | /* | |
387 | * Sanity check the domain. We don't support domains across | |
388 | * different IOMMUs. | |
389 | */ | |
390 | if (qcom_domain->iommu != qcom_iommu) { | |
391 | dev_err(dev, "cannot attach to IOMMU %s while already " | |
392 | "attached to domain on IOMMU %s\n", | |
393 | dev_name(qcom_domain->iommu->dev), | |
394 | dev_name(qcom_iommu->dev)); | |
395 | return -EINVAL; | |
396 | } | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
401 | static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
402 | { | |
2000e5f7 | 403 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0ae349a0 RC |
404 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); |
405 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
406 | unsigned i; | |
407 | ||
408 | if (!qcom_domain->iommu) | |
409 | return; | |
410 | ||
411 | pm_runtime_get_sync(qcom_iommu->dev); | |
412 | for (i = 0; i < fwspec->num_ids; i++) { | |
413 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
414 | ||
415 | /* Disable the context bank: */ | |
416 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); | |
049541e1 RC |
417 | |
418 | ctx->domain = NULL; | |
0ae349a0 RC |
419 | } |
420 | pm_runtime_put_sync(qcom_iommu->dev); | |
421 | ||
422 | qcom_domain->iommu = NULL; | |
423 | } | |
424 | ||
425 | static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
426 | phys_addr_t paddr, size_t size, int prot) | |
427 | { | |
428 | int ret; | |
429 | unsigned long flags; | |
430 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
431 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
432 | ||
433 | if (!ops) | |
434 | return -ENODEV; | |
435 | ||
436 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
437 | ret = ops->map(ops, iova, paddr, size, prot); | |
438 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
439 | return ret; | |
440 | } | |
441 | ||
442 | static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
56f8af5e | 443 | size_t size, struct iommu_iotlb_gather *gather) |
0ae349a0 RC |
444 | { |
445 | size_t ret; | |
446 | unsigned long flags; | |
447 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
448 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
449 | ||
450 | if (!ops) | |
451 | return 0; | |
452 | ||
453 | /* NOTE: unmap can be called after client device is powered off, | |
454 | * for example, with GPUs or anything involving dma-buf. So we | |
455 | * cannot rely on the device_link. Make sure the IOMMU is on to | |
456 | * avoid unclocked accesses in the TLB inv path: | |
457 | */ | |
458 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
459 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
a2d3a382 | 460 | ret = ops->unmap(ops, iova, size, gather); |
0ae349a0 RC |
461 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); |
462 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
463 | ||
464 | return ret; | |
465 | } | |
466 | ||
56f8af5e | 467 | static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain) |
4d689b61 RM |
468 | { |
469 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
470 | struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, | |
471 | struct io_pgtable, ops); | |
472 | if (!qcom_domain->pgtbl_ops) | |
473 | return; | |
474 | ||
475 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
476 | qcom_iommu_tlb_sync(pgtable->cookie); | |
477 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
478 | } | |
479 | ||
56f8af5e WD |
480 | static void qcom_iommu_iotlb_sync(struct iommu_domain *domain, |
481 | struct iommu_iotlb_gather *gather) | |
482 | { | |
483 | qcom_iommu_flush_iotlb_all(domain); | |
484 | } | |
485 | ||
0ae349a0 RC |
486 | static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, |
487 | dma_addr_t iova) | |
488 | { | |
489 | phys_addr_t ret; | |
490 | unsigned long flags; | |
491 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
492 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
493 | ||
494 | if (!ops) | |
495 | return 0; | |
496 | ||
497 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
498 | ret = ops->iova_to_phys(ops, iova); | |
499 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
500 | ||
501 | return ret; | |
502 | } | |
503 | ||
504 | static bool qcom_iommu_capable(enum iommu_cap cap) | |
505 | { | |
506 | switch (cap) { | |
507 | case IOMMU_CAP_CACHE_COHERENCY: | |
508 | /* | |
509 | * Return true here as the SMMU can always send out coherent | |
510 | * requests. | |
511 | */ | |
512 | return true; | |
513 | case IOMMU_CAP_NOEXEC: | |
514 | return true; | |
515 | default: | |
516 | return false; | |
517 | } | |
518 | } | |
519 | ||
520 | static int qcom_iommu_add_device(struct device *dev) | |
521 | { | |
2000e5f7 | 522 | struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); |
0ae349a0 RC |
523 | struct iommu_group *group; |
524 | struct device_link *link; | |
525 | ||
526 | if (!qcom_iommu) | |
527 | return -ENODEV; | |
528 | ||
529 | /* | |
530 | * Establish the link between iommu and master, so that the | |
531 | * iommu gets runtime enabled/disabled as per the master's | |
532 | * needs. | |
533 | */ | |
534 | link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME); | |
535 | if (!link) { | |
536 | dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n", | |
537 | dev_name(qcom_iommu->dev), dev_name(dev)); | |
538 | return -ENODEV; | |
539 | } | |
540 | ||
541 | group = iommu_group_get_for_dev(dev); | |
542 | if (IS_ERR_OR_NULL(group)) | |
543 | return PTR_ERR_OR_ZERO(group); | |
544 | ||
545 | iommu_group_put(group); | |
546 | iommu_device_link(&qcom_iommu->iommu, dev); | |
547 | ||
548 | return 0; | |
549 | } | |
550 | ||
551 | static void qcom_iommu_remove_device(struct device *dev) | |
552 | { | |
2000e5f7 | 553 | struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); |
0ae349a0 RC |
554 | |
555 | if (!qcom_iommu) | |
556 | return; | |
557 | ||
558 | iommu_device_unlink(&qcom_iommu->iommu, dev); | |
559 | iommu_group_remove_device(dev); | |
560 | iommu_fwspec_free(dev); | |
561 | } | |
562 | ||
563 | static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
564 | { | |
2000e5f7 | 565 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0ae349a0 RC |
566 | struct qcom_iommu_dev *qcom_iommu; |
567 | struct platform_device *iommu_pdev; | |
568 | unsigned asid = args->args[0]; | |
569 | ||
570 | if (args->args_count != 1) { | |
571 | dev_err(dev, "incorrect number of iommu params found for %s " | |
572 | "(found %d, expected 1)\n", | |
573 | args->np->full_name, args->args_count); | |
574 | return -EINVAL; | |
575 | } | |
576 | ||
577 | iommu_pdev = of_find_device_by_node(args->np); | |
578 | if (WARN_ON(!iommu_pdev)) | |
579 | return -EINVAL; | |
580 | ||
581 | qcom_iommu = platform_get_drvdata(iommu_pdev); | |
582 | ||
583 | /* make sure the asid specified in dt is valid, so we don't have | |
584 | * to sanity check this elsewhere, since 'asid - 1' is used to | |
585 | * index into qcom_iommu->ctxs: | |
586 | */ | |
587 | if (WARN_ON(asid < 1) || | |
588 | WARN_ON(asid > qcom_iommu->num_ctxs)) | |
589 | return -EINVAL; | |
590 | ||
2000e5f7 JR |
591 | if (!fwspec->iommu_priv) { |
592 | fwspec->iommu_priv = qcom_iommu; | |
0ae349a0 RC |
593 | } else { |
594 | /* make sure devices iommus dt node isn't referring to | |
595 | * multiple different iommu devices. Multiple context | |
596 | * banks are ok, but multiple devices are not: | |
597 | */ | |
2000e5f7 | 598 | if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) |
0ae349a0 RC |
599 | return -EINVAL; |
600 | } | |
601 | ||
602 | return iommu_fwspec_add_ids(dev, &asid, 1); | |
603 | } | |
604 | ||
605 | static const struct iommu_ops qcom_iommu_ops = { | |
606 | .capable = qcom_iommu_capable, | |
607 | .domain_alloc = qcom_iommu_domain_alloc, | |
608 | .domain_free = qcom_iommu_domain_free, | |
609 | .attach_dev = qcom_iommu_attach_dev, | |
610 | .detach_dev = qcom_iommu_detach_dev, | |
611 | .map = qcom_iommu_map, | |
612 | .unmap = qcom_iommu_unmap, | |
56f8af5e | 613 | .flush_iotlb_all = qcom_iommu_flush_iotlb_all, |
4d689b61 | 614 | .iotlb_sync = qcom_iommu_iotlb_sync, |
0ae349a0 RC |
615 | .iova_to_phys = qcom_iommu_iova_to_phys, |
616 | .add_device = qcom_iommu_add_device, | |
617 | .remove_device = qcom_iommu_remove_device, | |
618 | .device_group = generic_device_group, | |
619 | .of_xlate = qcom_iommu_of_xlate, | |
620 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
621 | }; | |
622 | ||
623 | static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu) | |
624 | { | |
625 | int ret; | |
626 | ||
627 | ret = clk_prepare_enable(qcom_iommu->iface_clk); | |
628 | if (ret) { | |
629 | dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n"); | |
630 | return ret; | |
631 | } | |
632 | ||
633 | ret = clk_prepare_enable(qcom_iommu->bus_clk); | |
634 | if (ret) { | |
635 | dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n"); | |
636 | clk_disable_unprepare(qcom_iommu->iface_clk); | |
637 | return ret; | |
638 | } | |
639 | ||
640 | return 0; | |
641 | } | |
642 | ||
643 | static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu) | |
644 | { | |
645 | clk_disable_unprepare(qcom_iommu->bus_clk); | |
646 | clk_disable_unprepare(qcom_iommu->iface_clk); | |
647 | } | |
648 | ||
d051f28c SV |
649 | static int qcom_iommu_sec_ptbl_init(struct device *dev) |
650 | { | |
651 | size_t psize = 0; | |
652 | unsigned int spare = 0; | |
653 | void *cpu_addr; | |
654 | dma_addr_t paddr; | |
655 | unsigned long attrs; | |
656 | static bool allocated = false; | |
657 | int ret; | |
658 | ||
659 | if (allocated) | |
660 | return 0; | |
661 | ||
662 | ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize); | |
663 | if (ret) { | |
664 | dev_err(dev, "failed to get iommu secure pgtable size (%d)\n", | |
665 | ret); | |
666 | return ret; | |
667 | } | |
668 | ||
669 | dev_info(dev, "iommu sec: pgtable size: %zu\n", psize); | |
670 | ||
671 | attrs = DMA_ATTR_NO_KERNEL_MAPPING; | |
672 | ||
673 | cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs); | |
674 | if (!cpu_addr) { | |
675 | dev_err(dev, "failed to allocate %zu bytes for pgtable\n", | |
676 | psize); | |
677 | return -ENOMEM; | |
678 | } | |
679 | ||
680 | ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare); | |
681 | if (ret) { | |
682 | dev_err(dev, "failed to init iommu pgtable (%d)\n", ret); | |
683 | goto free_mem; | |
684 | } | |
685 | ||
686 | allocated = true; | |
687 | return 0; | |
688 | ||
689 | free_mem: | |
690 | dma_free_attrs(dev, psize, cpu_addr, paddr, attrs); | |
691 | return ret; | |
692 | } | |
693 | ||
0ae349a0 RC |
694 | static int get_asid(const struct device_node *np) |
695 | { | |
696 | u32 reg; | |
697 | ||
698 | /* read the "reg" property directly to get the relative address | |
699 | * of the context bank, and calculate the asid from that: | |
700 | */ | |
701 | if (of_property_read_u32_index(np, "reg", 0, ®)) | |
702 | return -ENODEV; | |
703 | ||
704 | return reg / 0x1000; /* context banks are 0x1000 apart */ | |
705 | } | |
706 | ||
707 | static int qcom_iommu_ctx_probe(struct platform_device *pdev) | |
708 | { | |
709 | struct qcom_iommu_ctx *ctx; | |
710 | struct device *dev = &pdev->dev; | |
711 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); | |
712 | struct resource *res; | |
713 | int ret, irq; | |
714 | ||
715 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); | |
716 | if (!ctx) | |
717 | return -ENOMEM; | |
718 | ||
719 | ctx->dev = dev; | |
720 | platform_set_drvdata(pdev, ctx); | |
721 | ||
722 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
723 | ctx->base = devm_ioremap_resource(dev, res); | |
724 | if (IS_ERR(ctx->base)) | |
725 | return PTR_ERR(ctx->base); | |
726 | ||
727 | irq = platform_get_irq(pdev, 0); | |
086f9efa | 728 | if (irq < 0) |
0ae349a0 | 729 | return -ENODEV; |
0ae349a0 RC |
730 | |
731 | /* clear IRQs before registering fault handler, just in case the | |
732 | * boot-loader left us a surprise: | |
733 | */ | |
734 | iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); | |
735 | ||
736 | ret = devm_request_irq(dev, irq, | |
737 | qcom_iommu_fault, | |
738 | IRQF_SHARED, | |
739 | "qcom-iommu-fault", | |
740 | ctx); | |
741 | if (ret) { | |
742 | dev_err(dev, "failed to request IRQ %u\n", irq); | |
743 | return ret; | |
744 | } | |
745 | ||
746 | ret = get_asid(dev->of_node); | |
747 | if (ret < 0) { | |
748 | dev_err(dev, "missing reg property\n"); | |
749 | return ret; | |
750 | } | |
751 | ||
752 | ctx->asid = ret; | |
753 | ||
754 | dev_dbg(dev, "found asid %u\n", ctx->asid); | |
755 | ||
756 | qcom_iommu->ctxs[ctx->asid - 1] = ctx; | |
757 | ||
758 | return 0; | |
759 | } | |
760 | ||
761 | static int qcom_iommu_ctx_remove(struct platform_device *pdev) | |
762 | { | |
763 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent); | |
764 | struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev); | |
765 | ||
766 | platform_set_drvdata(pdev, NULL); | |
767 | ||
768 | qcom_iommu->ctxs[ctx->asid - 1] = NULL; | |
769 | ||
770 | return 0; | |
771 | } | |
772 | ||
773 | static const struct of_device_id ctx_of_match[] = { | |
774 | { .compatible = "qcom,msm-iommu-v1-ns" }, | |
775 | { .compatible = "qcom,msm-iommu-v1-sec" }, | |
776 | { /* sentinel */ } | |
777 | }; | |
778 | ||
779 | static struct platform_driver qcom_iommu_ctx_driver = { | |
780 | .driver = { | |
781 | .name = "qcom-iommu-ctx", | |
782 | .of_match_table = of_match_ptr(ctx_of_match), | |
783 | }, | |
784 | .probe = qcom_iommu_ctx_probe, | |
785 | .remove = qcom_iommu_ctx_remove, | |
786 | }; | |
787 | ||
d051f28c SV |
788 | static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) |
789 | { | |
790 | struct device_node *child; | |
791 | ||
792 | for_each_child_of_node(qcom_iommu->dev->of_node, child) | |
793 | if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) | |
794 | return true; | |
795 | ||
796 | return false; | |
797 | } | |
798 | ||
0ae349a0 RC |
799 | static int qcom_iommu_device_probe(struct platform_device *pdev) |
800 | { | |
801 | struct device_node *child; | |
802 | struct qcom_iommu_dev *qcom_iommu; | |
803 | struct device *dev = &pdev->dev; | |
804 | struct resource *res; | |
87585537 | 805 | int ret, max_asid = 0; |
0ae349a0 RC |
806 | |
807 | /* find the max asid (which is 1:1 to ctx bank idx), so we know how | |
808 | * many child ctx devices we have: | |
809 | */ | |
810 | for_each_child_of_node(dev->of_node, child) | |
811 | max_asid = max(max_asid, get_asid(child)); | |
812 | ||
87585537 GS |
813 | qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), |
814 | GFP_KERNEL); | |
0ae349a0 RC |
815 | if (!qcom_iommu) |
816 | return -ENOMEM; | |
817 | qcom_iommu->num_ctxs = max_asid; | |
818 | qcom_iommu->dev = dev; | |
819 | ||
820 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
821 | if (res) | |
822 | qcom_iommu->local_base = devm_ioremap_resource(dev, res); | |
823 | ||
824 | qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); | |
825 | if (IS_ERR(qcom_iommu->iface_clk)) { | |
826 | dev_err(dev, "failed to get iface clock\n"); | |
827 | return PTR_ERR(qcom_iommu->iface_clk); | |
828 | } | |
829 | ||
830 | qcom_iommu->bus_clk = devm_clk_get(dev, "bus"); | |
831 | if (IS_ERR(qcom_iommu->bus_clk)) { | |
832 | dev_err(dev, "failed to get bus clock\n"); | |
833 | return PTR_ERR(qcom_iommu->bus_clk); | |
834 | } | |
835 | ||
836 | if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id", | |
837 | &qcom_iommu->sec_id)) { | |
838 | dev_err(dev, "missing qcom,iommu-secure-id property\n"); | |
839 | return -ENODEV; | |
840 | } | |
841 | ||
d051f28c SV |
842 | if (qcom_iommu_has_secure_context(qcom_iommu)) { |
843 | ret = qcom_iommu_sec_ptbl_init(dev); | |
844 | if (ret) { | |
845 | dev_err(dev, "cannot init secure pg table(%d)\n", ret); | |
846 | return ret; | |
847 | } | |
848 | } | |
849 | ||
0ae349a0 RC |
850 | platform_set_drvdata(pdev, qcom_iommu); |
851 | ||
852 | pm_runtime_enable(dev); | |
853 | ||
854 | /* register context bank devices, which are child nodes: */ | |
855 | ret = devm_of_platform_populate(dev); | |
856 | if (ret) { | |
857 | dev_err(dev, "Failed to populate iommu contexts\n"); | |
858 | return ret; | |
859 | } | |
860 | ||
861 | ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL, | |
862 | dev_name(dev)); | |
863 | if (ret) { | |
864 | dev_err(dev, "Failed to register iommu in sysfs\n"); | |
865 | return ret; | |
866 | } | |
867 | ||
868 | iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops); | |
869 | iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode); | |
870 | ||
871 | ret = iommu_device_register(&qcom_iommu->iommu); | |
872 | if (ret) { | |
873 | dev_err(dev, "Failed to register iommu\n"); | |
874 | return ret; | |
875 | } | |
876 | ||
877 | bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); | |
878 | ||
879 | if (qcom_iommu->local_base) { | |
880 | pm_runtime_get_sync(dev); | |
881 | writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS); | |
882 | pm_runtime_put_sync(dev); | |
883 | } | |
884 | ||
885 | return 0; | |
886 | } | |
887 | ||
888 | static int qcom_iommu_device_remove(struct platform_device *pdev) | |
889 | { | |
890 | struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); | |
891 | ||
892 | bus_set_iommu(&platform_bus_type, NULL); | |
893 | ||
894 | pm_runtime_force_suspend(&pdev->dev); | |
895 | platform_set_drvdata(pdev, NULL); | |
896 | iommu_device_sysfs_remove(&qcom_iommu->iommu); | |
897 | iommu_device_unregister(&qcom_iommu->iommu); | |
898 | ||
899 | return 0; | |
900 | } | |
901 | ||
6ce5b0f2 | 902 | static int __maybe_unused qcom_iommu_resume(struct device *dev) |
0ae349a0 | 903 | { |
7d1bf14f | 904 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
0ae349a0 RC |
905 | |
906 | return qcom_iommu_enable_clocks(qcom_iommu); | |
907 | } | |
908 | ||
6ce5b0f2 | 909 | static int __maybe_unused qcom_iommu_suspend(struct device *dev) |
0ae349a0 | 910 | { |
7d1bf14f | 911 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
0ae349a0 RC |
912 | |
913 | qcom_iommu_disable_clocks(qcom_iommu); | |
914 | ||
915 | return 0; | |
916 | } | |
0ae349a0 RC |
917 | |
918 | static const struct dev_pm_ops qcom_iommu_pm_ops = { | |
919 | SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL) | |
920 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | |
921 | pm_runtime_force_resume) | |
922 | }; | |
923 | ||
924 | static const struct of_device_id qcom_iommu_of_match[] = { | |
925 | { .compatible = "qcom,msm-iommu-v1" }, | |
926 | { /* sentinel */ } | |
927 | }; | |
0ae349a0 RC |
928 | |
929 | static struct platform_driver qcom_iommu_driver = { | |
930 | .driver = { | |
931 | .name = "qcom-iommu", | |
932 | .of_match_table = of_match_ptr(qcom_iommu_of_match), | |
933 | .pm = &qcom_iommu_pm_ops, | |
934 | }, | |
935 | .probe = qcom_iommu_device_probe, | |
936 | .remove = qcom_iommu_device_remove, | |
937 | }; | |
938 | ||
939 | static int __init qcom_iommu_init(void) | |
940 | { | |
941 | int ret; | |
942 | ||
943 | ret = platform_driver_register(&qcom_iommu_ctx_driver); | |
944 | if (ret) | |
945 | return ret; | |
946 | ||
947 | ret = platform_driver_register(&qcom_iommu_driver); | |
948 | if (ret) | |
949 | platform_driver_unregister(&qcom_iommu_ctx_driver); | |
950 | ||
951 | return ret; | |
952 | } | |
f295cf26 | 953 | device_initcall(qcom_iommu_init); |