Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0ae349a0 RC |
2 | /* |
3 | * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c | |
4 | * | |
0ae349a0 RC |
5 | * Copyright (C) 2013 ARM Limited |
6 | * Copyright (C) 2017 Red Hat | |
7 | */ | |
8 | ||
9 | #include <linux/atomic.h> | |
620565a7 | 10 | #include <linux/bitfield.h> |
0ae349a0 RC |
11 | #include <linux/clk.h> |
12 | #include <linux/delay.h> | |
13 | #include <linux/dma-iommu.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/io-64-nonatomic-hi-lo.h> | |
b77cf11f | 19 | #include <linux/io-pgtable.h> |
0ae349a0 RC |
20 | #include <linux/iommu.h> |
21 | #include <linux/iopoll.h> | |
22 | #include <linux/kconfig.h> | |
f295cf26 | 23 | #include <linux/init.h> |
0ae349a0 RC |
24 | #include <linux/mutex.h> |
25 | #include <linux/of.h> | |
26 | #include <linux/of_address.h> | |
27 | #include <linux/of_device.h> | |
28 | #include <linux/of_iommu.h> | |
29 | #include <linux/platform_device.h> | |
30 | #include <linux/pm.h> | |
31 | #include <linux/pm_runtime.h> | |
32 | #include <linux/qcom_scm.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/spinlock.h> | |
35 | ||
c5fc6488 | 36 | #include "arm-smmu.h" |
0ae349a0 RC |
37 | |
38 | #define SMMU_INTR_SEL_NS 0x2000 | |
39 | ||
40 | struct qcom_iommu_ctx; | |
41 | ||
42 | struct qcom_iommu_dev { | |
43 | /* IOMMU core code handle */ | |
44 | struct iommu_device iommu; | |
45 | struct device *dev; | |
46 | struct clk *iface_clk; | |
47 | struct clk *bus_clk; | |
48 | void __iomem *local_base; | |
49 | u32 sec_id; | |
50 | u8 num_ctxs; | |
51 | struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ | |
52 | }; | |
53 | ||
54 | struct qcom_iommu_ctx { | |
55 | struct device *dev; | |
56 | void __iomem *base; | |
57 | bool secure_init; | |
58 | u8 asid; /* asid and ctx bank # are 1:1 */ | |
049541e1 | 59 | struct iommu_domain *domain; |
0ae349a0 RC |
60 | }; |
61 | ||
62 | struct qcom_iommu_domain { | |
63 | struct io_pgtable_ops *pgtbl_ops; | |
64 | spinlock_t pgtbl_lock; | |
65 | struct mutex init_mutex; /* Protects iommu pointer */ | |
66 | struct iommu_domain domain; | |
67 | struct qcom_iommu_dev *iommu; | |
68 | }; | |
69 | ||
70 | static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) | |
71 | { | |
72 | return container_of(dom, struct qcom_iommu_domain, domain); | |
73 | } | |
74 | ||
75 | static const struct iommu_ops qcom_iommu_ops; | |
76 | ||
77 | static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) | |
78 | { | |
79 | if (!fwspec || fwspec->ops != &qcom_iommu_ops) | |
80 | return NULL; | |
81 | return fwspec->iommu_priv; | |
82 | } | |
83 | ||
84 | static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) | |
85 | { | |
86 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); | |
87 | if (!qcom_iommu) | |
88 | return NULL; | |
89 | return qcom_iommu->ctxs[asid - 1]; | |
90 | } | |
91 | ||
92 | static inline void | |
93 | iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val) | |
94 | { | |
95 | writel_relaxed(val, ctx->base + reg); | |
96 | } | |
97 | ||
98 | static inline void | |
99 | iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val) | |
100 | { | |
101 | writeq_relaxed(val, ctx->base + reg); | |
102 | } | |
103 | ||
104 | static inline u32 | |
105 | iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg) | |
106 | { | |
107 | return readl_relaxed(ctx->base + reg); | |
108 | } | |
109 | ||
110 | static inline u64 | |
111 | iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) | |
112 | { | |
113 | return readq_relaxed(ctx->base + reg); | |
114 | } | |
115 | ||
116 | static void qcom_iommu_tlb_sync(void *cookie) | |
117 | { | |
118 | struct iommu_fwspec *fwspec = cookie; | |
119 | unsigned i; | |
120 | ||
121 | for (i = 0; i < fwspec->num_ids; i++) { | |
122 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
123 | unsigned int val, ret; | |
124 | ||
125 | iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); | |
126 | ||
127 | ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val, | |
128 | (val & 0x1) == 0, 0, 5000000); | |
129 | if (ret) | |
130 | dev_err(ctx->dev, "timeout waiting for TLB SYNC\n"); | |
131 | } | |
132 | } | |
133 | ||
134 | static void qcom_iommu_tlb_inv_context(void *cookie) | |
135 | { | |
136 | struct iommu_fwspec *fwspec = cookie; | |
137 | unsigned i; | |
138 | ||
139 | for (i = 0; i < fwspec->num_ids; i++) { | |
140 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
141 | iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); | |
142 | } | |
143 | ||
144 | qcom_iommu_tlb_sync(cookie); | |
145 | } | |
146 | ||
147 | static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
148 | size_t granule, bool leaf, void *cookie) | |
149 | { | |
150 | struct iommu_fwspec *fwspec = cookie; | |
151 | unsigned i, reg; | |
152 | ||
153 | reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
154 | ||
155 | for (i = 0; i < fwspec->num_ids; i++) { | |
156 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
157 | size_t s = size; | |
158 | ||
a5b396ce | 159 | iova = (iova >> 12) << 12; |
0ae349a0 RC |
160 | iova |= ctx->asid; |
161 | do { | |
162 | iommu_writel(ctx, reg, iova); | |
163 | iova += granule; | |
164 | } while (s -= granule); | |
165 | } | |
166 | } | |
167 | ||
05aed941 WD |
168 | static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, |
169 | size_t granule, void *cookie) | |
170 | { | |
171 | qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); | |
172 | qcom_iommu_tlb_sync(cookie); | |
173 | } | |
174 | ||
175 | static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, | |
176 | size_t granule, void *cookie) | |
177 | { | |
178 | qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie); | |
179 | qcom_iommu_tlb_sync(cookie); | |
180 | } | |
181 | ||
3951c41a WD |
182 | static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, |
183 | unsigned long iova, size_t granule, | |
abfd6fe0 WD |
184 | void *cookie) |
185 | { | |
186 | qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); | |
187 | } | |
188 | ||
298f7889 | 189 | static const struct iommu_flush_ops qcom_flush_ops = { |
0ae349a0 | 190 | .tlb_flush_all = qcom_iommu_tlb_inv_context, |
05aed941 WD |
191 | .tlb_flush_walk = qcom_iommu_tlb_flush_walk, |
192 | .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, | |
abfd6fe0 | 193 | .tlb_add_page = qcom_iommu_tlb_add_page, |
0ae349a0 RC |
194 | }; |
195 | ||
196 | static irqreturn_t qcom_iommu_fault(int irq, void *dev) | |
197 | { | |
198 | struct qcom_iommu_ctx *ctx = dev; | |
199 | u32 fsr, fsynr; | |
200 | u64 iova; | |
201 | ||
202 | fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); | |
203 | ||
fba6e960 | 204 | if (!(fsr & ARM_SMMU_FSR_FAULT)) |
0ae349a0 RC |
205 | return IRQ_NONE; |
206 | ||
207 | fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); | |
208 | iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); | |
209 | ||
049541e1 RC |
210 | if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) { |
211 | dev_err_ratelimited(ctx->dev, | |
212 | "Unhandled context fault: fsr=0x%x, " | |
213 | "iova=0x%016llx, fsynr=0x%x, cb=%d\n", | |
214 | fsr, iova, fsynr, ctx->asid); | |
215 | } | |
0ae349a0 RC |
216 | |
217 | iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); | |
fba6e960 | 218 | iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE); |
0ae349a0 RC |
219 | |
220 | return IRQ_HANDLED; | |
221 | } | |
222 | ||
223 | static int qcom_iommu_init_domain(struct iommu_domain *domain, | |
224 | struct qcom_iommu_dev *qcom_iommu, | |
225 | struct iommu_fwspec *fwspec) | |
226 | { | |
227 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
228 | struct io_pgtable_ops *pgtbl_ops; | |
229 | struct io_pgtable_cfg pgtbl_cfg; | |
230 | int i, ret = 0; | |
231 | u32 reg; | |
232 | ||
233 | mutex_lock(&qcom_domain->init_mutex); | |
234 | if (qcom_domain->iommu) | |
235 | goto out_unlock; | |
236 | ||
237 | pgtbl_cfg = (struct io_pgtable_cfg) { | |
238 | .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, | |
239 | .ias = 32, | |
240 | .oas = 40, | |
298f7889 | 241 | .tlb = &qcom_flush_ops, |
0ae349a0 RC |
242 | .iommu_dev = qcom_iommu->dev, |
243 | }; | |
244 | ||
245 | qcom_domain->iommu = qcom_iommu; | |
246 | pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); | |
247 | if (!pgtbl_ops) { | |
248 | dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); | |
249 | ret = -ENOMEM; | |
250 | goto out_clear_iommu; | |
251 | } | |
252 | ||
253 | /* Update the domain's page sizes to reflect the page table format */ | |
254 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
255 | domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1; | |
256 | domain->geometry.force_aperture = true; | |
257 | ||
258 | for (i = 0; i < fwspec->num_ids; i++) { | |
259 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
260 | ||
261 | if (!ctx->secure_init) { | |
262 | ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); | |
263 | if (ret) { | |
264 | dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret); | |
265 | goto out_clear_iommu; | |
266 | } | |
267 | ctx->secure_init = true; | |
268 | } | |
269 | ||
270 | /* TTBRs */ | |
271 | iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, | |
d1e5f26f | 272 | pgtbl_cfg.arm_lpae_s1_cfg.ttbr | |
fba6e960 | 273 | FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid)); |
fb485eb1 | 274 | iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0); |
0ae349a0 | 275 | |
620565a7 RM |
276 | /* TCR */ |
277 | iommu_writel(ctx, ARM_SMMU_CB_TCR2, | |
fb485eb1 | 278 | arm_smmu_lpae_tcr2(&pgtbl_cfg)); |
620565a7 | 279 | iommu_writel(ctx, ARM_SMMU_CB_TCR, |
fba6e960 | 280 | arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE); |
0ae349a0 RC |
281 | |
282 | /* MAIRs (stage-1 only) */ | |
283 | iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, | |
205577ab | 284 | pgtbl_cfg.arm_lpae_s1_cfg.mair); |
0ae349a0 | 285 | iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, |
205577ab | 286 | pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32); |
0ae349a0 RC |
287 | |
288 | /* SCTLR */ | |
fba6e960 WD |
289 | reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | |
290 | ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE | | |
291 | ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE | | |
292 | ARM_SMMU_SCTLR_CFCFG; | |
0ae349a0 RC |
293 | |
294 | if (IS_ENABLED(CONFIG_BIG_ENDIAN)) | |
fba6e960 | 295 | reg |= ARM_SMMU_SCTLR_E; |
0ae349a0 RC |
296 | |
297 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); | |
049541e1 RC |
298 | |
299 | ctx->domain = domain; | |
0ae349a0 RC |
300 | } |
301 | ||
302 | mutex_unlock(&qcom_domain->init_mutex); | |
303 | ||
304 | /* Publish page table ops for map/unmap */ | |
305 | qcom_domain->pgtbl_ops = pgtbl_ops; | |
306 | ||
307 | return 0; | |
308 | ||
309 | out_clear_iommu: | |
310 | qcom_domain->iommu = NULL; | |
311 | out_unlock: | |
312 | mutex_unlock(&qcom_domain->init_mutex); | |
313 | return ret; | |
314 | } | |
315 | ||
316 | static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type) | |
317 | { | |
318 | struct qcom_iommu_domain *qcom_domain; | |
319 | ||
320 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | |
321 | return NULL; | |
322 | /* | |
323 | * Allocate the domain and initialise some of its data structures. | |
324 | * We can't really do anything meaningful until we've added a | |
325 | * master. | |
326 | */ | |
327 | qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL); | |
328 | if (!qcom_domain) | |
329 | return NULL; | |
330 | ||
331 | if (type == IOMMU_DOMAIN_DMA && | |
332 | iommu_get_dma_cookie(&qcom_domain->domain)) { | |
333 | kfree(qcom_domain); | |
334 | return NULL; | |
335 | } | |
336 | ||
337 | mutex_init(&qcom_domain->init_mutex); | |
338 | spin_lock_init(&qcom_domain->pgtbl_lock); | |
339 | ||
340 | return &qcom_domain->domain; | |
341 | } | |
342 | ||
343 | static void qcom_iommu_domain_free(struct iommu_domain *domain) | |
344 | { | |
345 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
346 | ||
0ae349a0 RC |
347 | iommu_put_dma_cookie(domain); |
348 | ||
faf305c5 RM |
349 | if (qcom_domain->iommu) { |
350 | /* | |
351 | * NOTE: unmap can be called after client device is powered | |
352 | * off, for example, with GPUs or anything involving dma-buf. | |
353 | * So we cannot rely on the device_link. Make sure the IOMMU | |
354 | * is on to avoid unclocked accesses in the TLB inv path: | |
355 | */ | |
356 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
357 | free_io_pgtable_ops(qcom_domain->pgtbl_ops); | |
358 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
359 | } | |
0ae349a0 RC |
360 | |
361 | kfree(qcom_domain); | |
362 | } | |
363 | ||
364 | static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
365 | { | |
2000e5f7 JR |
366 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
367 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); | |
0ae349a0 RC |
368 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); |
369 | int ret; | |
370 | ||
371 | if (!qcom_iommu) { | |
372 | dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n"); | |
373 | return -ENXIO; | |
374 | } | |
375 | ||
376 | /* Ensure that the domain is finalized */ | |
377 | pm_runtime_get_sync(qcom_iommu->dev); | |
2000e5f7 | 378 | ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); |
0ae349a0 RC |
379 | pm_runtime_put_sync(qcom_iommu->dev); |
380 | if (ret < 0) | |
381 | return ret; | |
382 | ||
383 | /* | |
384 | * Sanity check the domain. We don't support domains across | |
385 | * different IOMMUs. | |
386 | */ | |
387 | if (qcom_domain->iommu != qcom_iommu) { | |
388 | dev_err(dev, "cannot attach to IOMMU %s while already " | |
389 | "attached to domain on IOMMU %s\n", | |
390 | dev_name(qcom_domain->iommu->dev), | |
391 | dev_name(qcom_iommu->dev)); | |
392 | return -EINVAL; | |
393 | } | |
394 | ||
395 | return 0; | |
396 | } | |
397 | ||
398 | static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
399 | { | |
2000e5f7 | 400 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0ae349a0 RC |
401 | struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); |
402 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
403 | unsigned i; | |
404 | ||
faf305c5 | 405 | if (WARN_ON(!qcom_domain->iommu)) |
0ae349a0 RC |
406 | return; |
407 | ||
408 | pm_runtime_get_sync(qcom_iommu->dev); | |
409 | for (i = 0; i < fwspec->num_ids; i++) { | |
410 | struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); | |
411 | ||
412 | /* Disable the context bank: */ | |
413 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); | |
049541e1 RC |
414 | |
415 | ctx->domain = NULL; | |
0ae349a0 RC |
416 | } |
417 | pm_runtime_put_sync(qcom_iommu->dev); | |
0ae349a0 RC |
418 | } |
419 | ||
420 | static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
781ca2de | 421 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
0ae349a0 RC |
422 | { |
423 | int ret; | |
424 | unsigned long flags; | |
425 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
426 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
427 | ||
428 | if (!ops) | |
429 | return -ENODEV; | |
430 | ||
431 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
432 | ret = ops->map(ops, iova, paddr, size, prot); | |
433 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
434 | return ret; | |
435 | } | |
436 | ||
437 | static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
56f8af5e | 438 | size_t size, struct iommu_iotlb_gather *gather) |
0ae349a0 RC |
439 | { |
440 | size_t ret; | |
441 | unsigned long flags; | |
442 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
443 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
444 | ||
445 | if (!ops) | |
446 | return 0; | |
447 | ||
448 | /* NOTE: unmap can be called after client device is powered off, | |
449 | * for example, with GPUs or anything involving dma-buf. So we | |
450 | * cannot rely on the device_link. Make sure the IOMMU is on to | |
451 | * avoid unclocked accesses in the TLB inv path: | |
452 | */ | |
453 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
454 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
a2d3a382 | 455 | ret = ops->unmap(ops, iova, size, gather); |
0ae349a0 RC |
456 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); |
457 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
458 | ||
459 | return ret; | |
460 | } | |
461 | ||
56f8af5e | 462 | static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain) |
4d689b61 RM |
463 | { |
464 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
465 | struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, | |
466 | struct io_pgtable, ops); | |
467 | if (!qcom_domain->pgtbl_ops) | |
468 | return; | |
469 | ||
470 | pm_runtime_get_sync(qcom_domain->iommu->dev); | |
471 | qcom_iommu_tlb_sync(pgtable->cookie); | |
472 | pm_runtime_put_sync(qcom_domain->iommu->dev); | |
473 | } | |
474 | ||
56f8af5e WD |
475 | static void qcom_iommu_iotlb_sync(struct iommu_domain *domain, |
476 | struct iommu_iotlb_gather *gather) | |
477 | { | |
478 | qcom_iommu_flush_iotlb_all(domain); | |
479 | } | |
480 | ||
0ae349a0 RC |
481 | static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, |
482 | dma_addr_t iova) | |
483 | { | |
484 | phys_addr_t ret; | |
485 | unsigned long flags; | |
486 | struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | |
487 | struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops; | |
488 | ||
489 | if (!ops) | |
490 | return 0; | |
491 | ||
492 | spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); | |
493 | ret = ops->iova_to_phys(ops, iova); | |
494 | spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); | |
495 | ||
496 | return ret; | |
497 | } | |
498 | ||
499 | static bool qcom_iommu_capable(enum iommu_cap cap) | |
500 | { | |
501 | switch (cap) { | |
502 | case IOMMU_CAP_CACHE_COHERENCY: | |
503 | /* | |
504 | * Return true here as the SMMU can always send out coherent | |
505 | * requests. | |
506 | */ | |
507 | return true; | |
508 | case IOMMU_CAP_NOEXEC: | |
509 | return true; | |
510 | default: | |
511 | return false; | |
512 | } | |
513 | } | |
514 | ||
515 | static int qcom_iommu_add_device(struct device *dev) | |
516 | { | |
2000e5f7 | 517 | struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); |
0ae349a0 RC |
518 | struct iommu_group *group; |
519 | struct device_link *link; | |
520 | ||
521 | if (!qcom_iommu) | |
522 | return -ENODEV; | |
523 | ||
524 | /* | |
525 | * Establish the link between iommu and master, so that the | |
526 | * iommu gets runtime enabled/disabled as per the master's | |
527 | * needs. | |
528 | */ | |
529 | link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME); | |
530 | if (!link) { | |
531 | dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n", | |
532 | dev_name(qcom_iommu->dev), dev_name(dev)); | |
533 | return -ENODEV; | |
534 | } | |
535 | ||
536 | group = iommu_group_get_for_dev(dev); | |
da6b05dc CJ |
537 | if (IS_ERR(group)) |
538 | return PTR_ERR(group); | |
0ae349a0 RC |
539 | |
540 | iommu_group_put(group); | |
541 | iommu_device_link(&qcom_iommu->iommu, dev); | |
542 | ||
543 | return 0; | |
544 | } | |
545 | ||
546 | static void qcom_iommu_remove_device(struct device *dev) | |
547 | { | |
2000e5f7 | 548 | struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); |
0ae349a0 RC |
549 | |
550 | if (!qcom_iommu) | |
551 | return; | |
552 | ||
553 | iommu_device_unlink(&qcom_iommu->iommu, dev); | |
554 | iommu_group_remove_device(dev); | |
555 | iommu_fwspec_free(dev); | |
556 | } | |
557 | ||
558 | static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
559 | { | |
2000e5f7 | 560 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
0ae349a0 RC |
561 | struct qcom_iommu_dev *qcom_iommu; |
562 | struct platform_device *iommu_pdev; | |
563 | unsigned asid = args->args[0]; | |
564 | ||
565 | if (args->args_count != 1) { | |
566 | dev_err(dev, "incorrect number of iommu params found for %s " | |
567 | "(found %d, expected 1)\n", | |
568 | args->np->full_name, args->args_count); | |
569 | return -EINVAL; | |
570 | } | |
571 | ||
572 | iommu_pdev = of_find_device_by_node(args->np); | |
573 | if (WARN_ON(!iommu_pdev)) | |
574 | return -EINVAL; | |
575 | ||
576 | qcom_iommu = platform_get_drvdata(iommu_pdev); | |
577 | ||
578 | /* make sure the asid specified in dt is valid, so we don't have | |
579 | * to sanity check this elsewhere, since 'asid - 1' is used to | |
580 | * index into qcom_iommu->ctxs: | |
581 | */ | |
582 | if (WARN_ON(asid < 1) || | |
583 | WARN_ON(asid > qcom_iommu->num_ctxs)) | |
584 | return -EINVAL; | |
585 | ||
2000e5f7 JR |
586 | if (!fwspec->iommu_priv) { |
587 | fwspec->iommu_priv = qcom_iommu; | |
0ae349a0 RC |
588 | } else { |
589 | /* make sure devices iommus dt node isn't referring to | |
590 | * multiple different iommu devices. Multiple context | |
591 | * banks are ok, but multiple devices are not: | |
592 | */ | |
2000e5f7 | 593 | if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) |
0ae349a0 RC |
594 | return -EINVAL; |
595 | } | |
596 | ||
597 | return iommu_fwspec_add_ids(dev, &asid, 1); | |
598 | } | |
599 | ||
600 | static const struct iommu_ops qcom_iommu_ops = { | |
601 | .capable = qcom_iommu_capable, | |
602 | .domain_alloc = qcom_iommu_domain_alloc, | |
603 | .domain_free = qcom_iommu_domain_free, | |
604 | .attach_dev = qcom_iommu_attach_dev, | |
605 | .detach_dev = qcom_iommu_detach_dev, | |
606 | .map = qcom_iommu_map, | |
607 | .unmap = qcom_iommu_unmap, | |
56f8af5e | 608 | .flush_iotlb_all = qcom_iommu_flush_iotlb_all, |
4d689b61 | 609 | .iotlb_sync = qcom_iommu_iotlb_sync, |
0ae349a0 RC |
610 | .iova_to_phys = qcom_iommu_iova_to_phys, |
611 | .add_device = qcom_iommu_add_device, | |
612 | .remove_device = qcom_iommu_remove_device, | |
613 | .device_group = generic_device_group, | |
614 | .of_xlate = qcom_iommu_of_xlate, | |
615 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
616 | }; | |
617 | ||
618 | static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu) | |
619 | { | |
620 | int ret; | |
621 | ||
622 | ret = clk_prepare_enable(qcom_iommu->iface_clk); | |
623 | if (ret) { | |
624 | dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n"); | |
625 | return ret; | |
626 | } | |
627 | ||
628 | ret = clk_prepare_enable(qcom_iommu->bus_clk); | |
629 | if (ret) { | |
630 | dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n"); | |
631 | clk_disable_unprepare(qcom_iommu->iface_clk); | |
632 | return ret; | |
633 | } | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
638 | static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu) | |
639 | { | |
640 | clk_disable_unprepare(qcom_iommu->bus_clk); | |
641 | clk_disable_unprepare(qcom_iommu->iface_clk); | |
642 | } | |
643 | ||
d051f28c SV |
644 | static int qcom_iommu_sec_ptbl_init(struct device *dev) |
645 | { | |
646 | size_t psize = 0; | |
647 | unsigned int spare = 0; | |
648 | void *cpu_addr; | |
649 | dma_addr_t paddr; | |
650 | unsigned long attrs; | |
651 | static bool allocated = false; | |
652 | int ret; | |
653 | ||
654 | if (allocated) | |
655 | return 0; | |
656 | ||
657 | ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize); | |
658 | if (ret) { | |
659 | dev_err(dev, "failed to get iommu secure pgtable size (%d)\n", | |
660 | ret); | |
661 | return ret; | |
662 | } | |
663 | ||
664 | dev_info(dev, "iommu sec: pgtable size: %zu\n", psize); | |
665 | ||
666 | attrs = DMA_ATTR_NO_KERNEL_MAPPING; | |
667 | ||
668 | cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs); | |
669 | if (!cpu_addr) { | |
670 | dev_err(dev, "failed to allocate %zu bytes for pgtable\n", | |
671 | psize); | |
672 | return -ENOMEM; | |
673 | } | |
674 | ||
675 | ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare); | |
676 | if (ret) { | |
677 | dev_err(dev, "failed to init iommu pgtable (%d)\n", ret); | |
678 | goto free_mem; | |
679 | } | |
680 | ||
681 | allocated = true; | |
682 | return 0; | |
683 | ||
684 | free_mem: | |
685 | dma_free_attrs(dev, psize, cpu_addr, paddr, attrs); | |
686 | return ret; | |
687 | } | |
688 | ||
0ae349a0 RC |
689 | static int get_asid(const struct device_node *np) |
690 | { | |
691 | u32 reg; | |
692 | ||
693 | /* read the "reg" property directly to get the relative address | |
694 | * of the context bank, and calculate the asid from that: | |
695 | */ | |
696 | if (of_property_read_u32_index(np, "reg", 0, ®)) | |
697 | return -ENODEV; | |
698 | ||
699 | return reg / 0x1000; /* context banks are 0x1000 apart */ | |
700 | } | |
701 | ||
702 | static int qcom_iommu_ctx_probe(struct platform_device *pdev) | |
703 | { | |
704 | struct qcom_iommu_ctx *ctx; | |
705 | struct device *dev = &pdev->dev; | |
706 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); | |
707 | struct resource *res; | |
708 | int ret, irq; | |
709 | ||
710 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); | |
711 | if (!ctx) | |
712 | return -ENOMEM; | |
713 | ||
714 | ctx->dev = dev; | |
715 | platform_set_drvdata(pdev, ctx); | |
716 | ||
717 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
718 | ctx->base = devm_ioremap_resource(dev, res); | |
719 | if (IS_ERR(ctx->base)) | |
720 | return PTR_ERR(ctx->base); | |
721 | ||
722 | irq = platform_get_irq(pdev, 0); | |
086f9efa | 723 | if (irq < 0) |
0ae349a0 | 724 | return -ENODEV; |
0ae349a0 RC |
725 | |
726 | /* clear IRQs before registering fault handler, just in case the | |
727 | * boot-loader left us a surprise: | |
728 | */ | |
729 | iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); | |
730 | ||
731 | ret = devm_request_irq(dev, irq, | |
732 | qcom_iommu_fault, | |
733 | IRQF_SHARED, | |
734 | "qcom-iommu-fault", | |
735 | ctx); | |
736 | if (ret) { | |
737 | dev_err(dev, "failed to request IRQ %u\n", irq); | |
738 | return ret; | |
739 | } | |
740 | ||
741 | ret = get_asid(dev->of_node); | |
742 | if (ret < 0) { | |
743 | dev_err(dev, "missing reg property\n"); | |
744 | return ret; | |
745 | } | |
746 | ||
747 | ctx->asid = ret; | |
748 | ||
749 | dev_dbg(dev, "found asid %u\n", ctx->asid); | |
750 | ||
751 | qcom_iommu->ctxs[ctx->asid - 1] = ctx; | |
752 | ||
753 | return 0; | |
754 | } | |
755 | ||
756 | static int qcom_iommu_ctx_remove(struct platform_device *pdev) | |
757 | { | |
758 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent); | |
759 | struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev); | |
760 | ||
761 | platform_set_drvdata(pdev, NULL); | |
762 | ||
763 | qcom_iommu->ctxs[ctx->asid - 1] = NULL; | |
764 | ||
765 | return 0; | |
766 | } | |
767 | ||
768 | static const struct of_device_id ctx_of_match[] = { | |
769 | { .compatible = "qcom,msm-iommu-v1-ns" }, | |
770 | { .compatible = "qcom,msm-iommu-v1-sec" }, | |
771 | { /* sentinel */ } | |
772 | }; | |
773 | ||
774 | static struct platform_driver qcom_iommu_ctx_driver = { | |
775 | .driver = { | |
776 | .name = "qcom-iommu-ctx", | |
777 | .of_match_table = of_match_ptr(ctx_of_match), | |
778 | }, | |
779 | .probe = qcom_iommu_ctx_probe, | |
780 | .remove = qcom_iommu_ctx_remove, | |
781 | }; | |
782 | ||
d051f28c SV |
783 | static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) |
784 | { | |
785 | struct device_node *child; | |
786 | ||
787 | for_each_child_of_node(qcom_iommu->dev->of_node, child) | |
788 | if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) | |
789 | return true; | |
790 | ||
791 | return false; | |
792 | } | |
793 | ||
0ae349a0 RC |
794 | static int qcom_iommu_device_probe(struct platform_device *pdev) |
795 | { | |
796 | struct device_node *child; | |
797 | struct qcom_iommu_dev *qcom_iommu; | |
798 | struct device *dev = &pdev->dev; | |
799 | struct resource *res; | |
87585537 | 800 | int ret, max_asid = 0; |
0ae349a0 RC |
801 | |
802 | /* find the max asid (which is 1:1 to ctx bank idx), so we know how | |
803 | * many child ctx devices we have: | |
804 | */ | |
805 | for_each_child_of_node(dev->of_node, child) | |
806 | max_asid = max(max_asid, get_asid(child)); | |
807 | ||
87585537 GS |
808 | qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), |
809 | GFP_KERNEL); | |
0ae349a0 RC |
810 | if (!qcom_iommu) |
811 | return -ENOMEM; | |
812 | qcom_iommu->num_ctxs = max_asid; | |
813 | qcom_iommu->dev = dev; | |
814 | ||
815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
816 | if (res) | |
817 | qcom_iommu->local_base = devm_ioremap_resource(dev, res); | |
818 | ||
819 | qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); | |
820 | if (IS_ERR(qcom_iommu->iface_clk)) { | |
821 | dev_err(dev, "failed to get iface clock\n"); | |
822 | return PTR_ERR(qcom_iommu->iface_clk); | |
823 | } | |
824 | ||
825 | qcom_iommu->bus_clk = devm_clk_get(dev, "bus"); | |
826 | if (IS_ERR(qcom_iommu->bus_clk)) { | |
827 | dev_err(dev, "failed to get bus clock\n"); | |
828 | return PTR_ERR(qcom_iommu->bus_clk); | |
829 | } | |
830 | ||
831 | if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id", | |
832 | &qcom_iommu->sec_id)) { | |
833 | dev_err(dev, "missing qcom,iommu-secure-id property\n"); | |
834 | return -ENODEV; | |
835 | } | |
836 | ||
d051f28c SV |
837 | if (qcom_iommu_has_secure_context(qcom_iommu)) { |
838 | ret = qcom_iommu_sec_ptbl_init(dev); | |
839 | if (ret) { | |
840 | dev_err(dev, "cannot init secure pg table(%d)\n", ret); | |
841 | return ret; | |
842 | } | |
843 | } | |
844 | ||
0ae349a0 RC |
845 | platform_set_drvdata(pdev, qcom_iommu); |
846 | ||
847 | pm_runtime_enable(dev); | |
848 | ||
849 | /* register context bank devices, which are child nodes: */ | |
850 | ret = devm_of_platform_populate(dev); | |
851 | if (ret) { | |
852 | dev_err(dev, "Failed to populate iommu contexts\n"); | |
853 | return ret; | |
854 | } | |
855 | ||
856 | ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL, | |
857 | dev_name(dev)); | |
858 | if (ret) { | |
859 | dev_err(dev, "Failed to register iommu in sysfs\n"); | |
860 | return ret; | |
861 | } | |
862 | ||
863 | iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops); | |
864 | iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode); | |
865 | ||
866 | ret = iommu_device_register(&qcom_iommu->iommu); | |
867 | if (ret) { | |
868 | dev_err(dev, "Failed to register iommu\n"); | |
869 | return ret; | |
870 | } | |
871 | ||
872 | bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); | |
873 | ||
874 | if (qcom_iommu->local_base) { | |
875 | pm_runtime_get_sync(dev); | |
876 | writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS); | |
877 | pm_runtime_put_sync(dev); | |
878 | } | |
879 | ||
880 | return 0; | |
881 | } | |
882 | ||
883 | static int qcom_iommu_device_remove(struct platform_device *pdev) | |
884 | { | |
885 | struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); | |
886 | ||
887 | bus_set_iommu(&platform_bus_type, NULL); | |
888 | ||
889 | pm_runtime_force_suspend(&pdev->dev); | |
890 | platform_set_drvdata(pdev, NULL); | |
891 | iommu_device_sysfs_remove(&qcom_iommu->iommu); | |
892 | iommu_device_unregister(&qcom_iommu->iommu); | |
893 | ||
894 | return 0; | |
895 | } | |
896 | ||
6ce5b0f2 | 897 | static int __maybe_unused qcom_iommu_resume(struct device *dev) |
0ae349a0 | 898 | { |
7d1bf14f | 899 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
0ae349a0 RC |
900 | |
901 | return qcom_iommu_enable_clocks(qcom_iommu); | |
902 | } | |
903 | ||
6ce5b0f2 | 904 | static int __maybe_unused qcom_iommu_suspend(struct device *dev) |
0ae349a0 | 905 | { |
7d1bf14f | 906 | struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); |
0ae349a0 RC |
907 | |
908 | qcom_iommu_disable_clocks(qcom_iommu); | |
909 | ||
910 | return 0; | |
911 | } | |
0ae349a0 RC |
912 | |
913 | static const struct dev_pm_ops qcom_iommu_pm_ops = { | |
914 | SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL) | |
915 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | |
916 | pm_runtime_force_resume) | |
917 | }; | |
918 | ||
919 | static const struct of_device_id qcom_iommu_of_match[] = { | |
920 | { .compatible = "qcom,msm-iommu-v1" }, | |
921 | { /* sentinel */ } | |
922 | }; | |
0ae349a0 RC |
923 | |
924 | static struct platform_driver qcom_iommu_driver = { | |
925 | .driver = { | |
926 | .name = "qcom-iommu", | |
927 | .of_match_table = of_match_ptr(qcom_iommu_of_match), | |
928 | .pm = &qcom_iommu_pm_ops, | |
929 | }, | |
930 | .probe = qcom_iommu_device_probe, | |
931 | .remove = qcom_iommu_device_remove, | |
932 | }; | |
933 | ||
934 | static int __init qcom_iommu_init(void) | |
935 | { | |
936 | int ret; | |
937 | ||
938 | ret = platform_driver_register(&qcom_iommu_ctx_driver); | |
939 | if (ret) | |
940 | return ret; | |
941 | ||
942 | ret = platform_driver_register(&qcom_iommu_driver); | |
943 | if (ret) | |
944 | platform_driver_unregister(&qcom_iommu_ctx_driver); | |
945 | ||
946 | return ret; | |
947 | } | |
f295cf26 | 948 | device_initcall(qcom_iommu_init); |