Commit | Line | Data |
---|---|---|
08dbd0f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
41f3f513 | 2 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
a007dd51 PG |
3 | * |
4 | * Author: Stepan Moskovchenko <stepanm@codeaurora.org> | |
0720d1f0 SM |
5 | */ |
6 | ||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
8 | #include <linux/kernel.h> | |
a007dd51 | 9 | #include <linux/init.h> |
0720d1f0 SM |
10 | #include <linux/platform_device.h> |
11 | #include <linux/errno.h> | |
12 | #include <linux/io.h> | |
b77cf11f | 13 | #include <linux/io-pgtable.h> |
0720d1f0 SM |
14 | #include <linux/interrupt.h> |
15 | #include <linux/list.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/iommu.h> | |
41f3f513 | 19 | #include <linux/clk.h> |
f7f125ef | 20 | #include <linux/err.h> |
f78ebca8 | 21 | #include <linux/of_iommu.h> |
0720d1f0 SM |
22 | |
23 | #include <asm/cacheflush.h> | |
87dfb311 | 24 | #include <linux/sizes.h> |
0720d1f0 | 25 | |
0b559df5 SB |
26 | #include "msm_iommu_hw-8xxx.h" |
27 | #include "msm_iommu.h" | |
0720d1f0 | 28 | |
100832c9 SM |
29 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
30 | __asm__ __volatile__ ( \ | |
31 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ | |
32 | : "=r" (reg)) | |
33 | ||
83427275 OBC |
34 | /* bitmap of the page sizes currently supported */ |
35 | #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) | |
36 | ||
0720d1f0 | 37 | DEFINE_SPINLOCK(msm_iommu_lock); |
109bd48e | 38 | static LIST_HEAD(qcom_iommu_devices); |
c9220fbd | 39 | static struct iommu_ops msm_iommu_ops; |
0720d1f0 SM |
40 | |
41 | struct msm_priv { | |
0720d1f0 | 42 | struct list_head list_attached; |
3e116c3c | 43 | struct iommu_domain domain; |
c9220fbd S |
44 | struct io_pgtable_cfg cfg; |
45 | struct io_pgtable_ops *iop; | |
46 | struct device *dev; | |
47 | spinlock_t pgtlock; /* pagetable lock */ | |
0720d1f0 SM |
48 | }; |
49 | ||
3e116c3c JR |
50 | static struct msm_priv *to_msm_priv(struct iommu_domain *dom) |
51 | { | |
52 | return container_of(dom, struct msm_priv, domain); | |
53 | } | |
54 | ||
109bd48e | 55 | static int __enable_clocks(struct msm_iommu_dev *iommu) |
41f3f513 SM |
56 | { |
57 | int ret; | |
58 | ||
109bd48e | 59 | ret = clk_enable(iommu->pclk); |
41f3f513 SM |
60 | if (ret) |
61 | goto fail; | |
62 | ||
109bd48e S |
63 | if (iommu->clk) { |
64 | ret = clk_enable(iommu->clk); | |
41f3f513 | 65 | if (ret) |
109bd48e | 66 | clk_disable(iommu->pclk); |
41f3f513 SM |
67 | } |
68 | fail: | |
69 | return ret; | |
70 | } | |
71 | ||
109bd48e | 72 | static void __disable_clocks(struct msm_iommu_dev *iommu) |
41f3f513 | 73 | { |
109bd48e S |
74 | if (iommu->clk) |
75 | clk_disable(iommu->clk); | |
76 | clk_disable(iommu->pclk); | |
41f3f513 SM |
77 | } |
78 | ||
f7f125ef S |
79 | static void msm_iommu_reset(void __iomem *base, int ncb) |
80 | { | |
81 | int ctx; | |
82 | ||
83 | SET_RPUE(base, 0); | |
84 | SET_RPUEIE(base, 0); | |
85 | SET_ESRRESTORE(base, 0); | |
86 | SET_TBE(base, 0); | |
87 | SET_CR(base, 0); | |
88 | SET_SPDMBE(base, 0); | |
89 | SET_TESTBUSCR(base, 0); | |
90 | SET_TLBRSW(base, 0); | |
91 | SET_GLOBAL_TLBIALL(base, 0); | |
92 | SET_RPU_ACR(base, 0); | |
93 | SET_TLBLKCRWE(base, 1); | |
94 | ||
95 | for (ctx = 0; ctx < ncb; ctx++) { | |
96 | SET_BPRCOSH(base, ctx, 0); | |
97 | SET_BPRCISH(base, ctx, 0); | |
98 | SET_BPRCNSH(base, ctx, 0); | |
99 | SET_BPSHCFG(base, ctx, 0); | |
100 | SET_BPMTCFG(base, ctx, 0); | |
101 | SET_ACTLR(base, ctx, 0); | |
102 | SET_SCTLR(base, ctx, 0); | |
103 | SET_FSRRESTORE(base, ctx, 0); | |
104 | SET_TTBR0(base, ctx, 0); | |
105 | SET_TTBR1(base, ctx, 0); | |
106 | SET_TTBCR(base, ctx, 0); | |
107 | SET_BFBCR(base, ctx, 0); | |
108 | SET_PAR(base, ctx, 0); | |
109 | SET_FAR(base, ctx, 0); | |
110 | SET_CTX_TLBIALL(base, ctx, 0); | |
111 | SET_TLBFLPTER(base, ctx, 0); | |
112 | SET_TLBSLPTER(base, ctx, 0); | |
113 | SET_TLBLKCR(base, ctx, 0); | |
f7f125ef S |
114 | SET_CONTEXTIDR(base, ctx, 0); |
115 | } | |
116 | } | |
117 | ||
c9220fbd | 118 | static void __flush_iotlb(void *cookie) |
0720d1f0 | 119 | { |
c9220fbd | 120 | struct msm_priv *priv = cookie; |
109bd48e S |
121 | struct msm_iommu_dev *iommu = NULL; |
122 | struct msm_iommu_ctx_dev *master; | |
33069739 | 123 | int ret = 0; |
109bd48e | 124 | |
c9220fbd S |
125 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
126 | ret = __enable_clocks(iommu); | |
127 | if (ret) | |
128 | goto fail; | |
0720d1f0 | 129 | |
c9220fbd S |
130 | list_for_each_entry(master, &iommu->ctx_list, list) |
131 | SET_CTX_TLBIALL(iommu->base, master->num, 0); | |
0720d1f0 | 132 | |
c9220fbd | 133 | __disable_clocks(iommu); |
f6f41eb9 | 134 | } |
c9220fbd S |
135 | fail: |
136 | return; | |
137 | } | |
138 | ||
139 | static void __flush_iotlb_range(unsigned long iova, size_t size, | |
140 | size_t granule, bool leaf, void *cookie) | |
141 | { | |
142 | struct msm_priv *priv = cookie; | |
143 | struct msm_iommu_dev *iommu = NULL; | |
144 | struct msm_iommu_ctx_dev *master; | |
145 | int ret = 0; | |
146 | int temp_size; | |
0720d1f0 | 147 | |
109bd48e S |
148 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
149 | ret = __enable_clocks(iommu); | |
41f3f513 SM |
150 | if (ret) |
151 | goto fail; | |
152 | ||
c9220fbd S |
153 | list_for_each_entry(master, &iommu->ctx_list, list) { |
154 | temp_size = size; | |
155 | do { | |
156 | iova &= TLBIVA_VA; | |
157 | iova |= GET_CONTEXTIDR_ASID(iommu->base, | |
158 | master->num); | |
159 | SET_TLBIVA(iommu->base, master->num, iova); | |
160 | iova += granule; | |
161 | } while (temp_size -= granule); | |
162 | } | |
109bd48e S |
163 | |
164 | __disable_clocks(iommu); | |
0720d1f0 | 165 | } |
c9220fbd | 166 | |
41f3f513 | 167 | fail: |
c9220fbd | 168 | return; |
0720d1f0 SM |
169 | } |
170 | ||
c9220fbd S |
171 | static void __flush_iotlb_sync(void *cookie) |
172 | { | |
173 | /* | |
174 | * Nothing is needed here, the barrier to guarantee | |
175 | * completion of the tlb sync operation is implicitly | |
176 | * taken care when the iommu client does a writel before | |
177 | * kick starting the other master. | |
178 | */ | |
179 | } | |
180 | ||
181 | static const struct iommu_gather_ops msm_iommu_gather_ops = { | |
182 | .tlb_flush_all = __flush_iotlb, | |
183 | .tlb_add_flush = __flush_iotlb_range, | |
184 | .tlb_sync = __flush_iotlb_sync, | |
185 | }; | |
186 | ||
109bd48e S |
187 | static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) |
188 | { | |
189 | int idx; | |
190 | ||
191 | do { | |
192 | idx = find_next_zero_bit(map, end, start); | |
193 | if (idx == end) | |
194 | return -ENOSPC; | |
195 | } while (test_and_set_bit(idx, map)); | |
196 | ||
197 | return idx; | |
198 | } | |
199 | ||
200 | static void msm_iommu_free_ctx(unsigned long *map, int idx) | |
201 | { | |
202 | clear_bit(idx, map); | |
203 | } | |
204 | ||
205 | static void config_mids(struct msm_iommu_dev *iommu, | |
206 | struct msm_iommu_ctx_dev *master) | |
207 | { | |
208 | int mid, ctx, i; | |
209 | ||
210 | for (i = 0; i < master->num_mids; i++) { | |
211 | mid = master->mids[i]; | |
212 | ctx = master->num; | |
213 | ||
214 | SET_M2VCBR_N(iommu->base, mid, 0); | |
215 | SET_CBACR_N(iommu->base, ctx, 0); | |
216 | ||
217 | /* Set VMID = 0 */ | |
218 | SET_VMID(iommu->base, mid, 0); | |
219 | ||
220 | /* Set the context number for that MID to this context */ | |
221 | SET_CBNDX(iommu->base, mid, ctx); | |
222 | ||
223 | /* Set MID associated with this context bank to 0*/ | |
224 | SET_CBVMID(iommu->base, ctx, 0); | |
225 | ||
226 | /* Set the ASID for TLB tagging for this context */ | |
227 | SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); | |
228 | ||
229 | /* Set security bit override to be Non-secure */ | |
230 | SET_NSCFG(iommu->base, mid, 3); | |
231 | } | |
232 | } | |
233 | ||
0720d1f0 SM |
234 | static void __reset_context(void __iomem *base, int ctx) |
235 | { | |
236 | SET_BPRCOSH(base, ctx, 0); | |
237 | SET_BPRCISH(base, ctx, 0); | |
238 | SET_BPRCNSH(base, ctx, 0); | |
239 | SET_BPSHCFG(base, ctx, 0); | |
240 | SET_BPMTCFG(base, ctx, 0); | |
241 | SET_ACTLR(base, ctx, 0); | |
242 | SET_SCTLR(base, ctx, 0); | |
243 | SET_FSRRESTORE(base, ctx, 0); | |
244 | SET_TTBR0(base, ctx, 0); | |
245 | SET_TTBR1(base, ctx, 0); | |
246 | SET_TTBCR(base, ctx, 0); | |
247 | SET_BFBCR(base, ctx, 0); | |
248 | SET_PAR(base, ctx, 0); | |
249 | SET_FAR(base, ctx, 0); | |
250 | SET_CTX_TLBIALL(base, ctx, 0); | |
251 | SET_TLBFLPTER(base, ctx, 0); | |
252 | SET_TLBSLPTER(base, ctx, 0); | |
253 | SET_TLBLKCR(base, ctx, 0); | |
0720d1f0 SM |
254 | } |
255 | ||
c9220fbd S |
256 | static void __program_context(void __iomem *base, int ctx, |
257 | struct msm_priv *priv) | |
0720d1f0 SM |
258 | { |
259 | __reset_context(base, ctx); | |
260 | ||
c9220fbd S |
261 | /* Turn on TEX Remap */ |
262 | SET_TRE(base, ctx, 1); | |
263 | SET_AFE(base, ctx, 1); | |
264 | ||
0720d1f0 SM |
265 | /* Set up HTW mode */ |
266 | /* TLB miss configuration: perform HTW on miss */ | |
267 | SET_TLBMCFG(base, ctx, 0x3); | |
268 | ||
269 | /* V2P configuration: HTW for access */ | |
270 | SET_V2PCFG(base, ctx, 0x3); | |
271 | ||
c9220fbd S |
272 | SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); |
273 | SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]); | |
274 | SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]); | |
275 | ||
276 | /* Set prrr and nmrr */ | |
277 | SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); | |
278 | SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); | |
0720d1f0 SM |
279 | |
280 | /* Invalidate the TLB for this context */ | |
281 | SET_CTX_TLBIALL(base, ctx, 0); | |
282 | ||
283 | /* Set interrupt number to "secure" interrupt */ | |
284 | SET_IRPTNDX(base, ctx, 0); | |
285 | ||
286 | /* Enable context fault interrupt */ | |
287 | SET_CFEIE(base, ctx, 1); | |
288 | ||
289 | /* Stall access on a context fault and let the handler deal with it */ | |
290 | SET_CFCFG(base, ctx, 1); | |
291 | ||
292 | /* Redirect all cacheable requests to L2 slave port. */ | |
293 | SET_RCISH(base, ctx, 1); | |
294 | SET_RCOSH(base, ctx, 1); | |
295 | SET_RCNSH(base, ctx, 1); | |
296 | ||
0720d1f0 SM |
297 | /* Turn on BFB prefetch */ |
298 | SET_BFBDFE(base, ctx, 1); | |
299 | ||
0720d1f0 SM |
300 | /* Enable the MMU */ |
301 | SET_M(base, ctx, 1); | |
302 | } | |
303 | ||
3e116c3c | 304 | static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) |
0720d1f0 | 305 | { |
3e116c3c | 306 | struct msm_priv *priv; |
0720d1f0 | 307 | |
3e116c3c JR |
308 | if (type != IOMMU_DOMAIN_UNMANAGED) |
309 | return NULL; | |
310 | ||
311 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
0720d1f0 SM |
312 | if (!priv) |
313 | goto fail_nomem; | |
314 | ||
315 | INIT_LIST_HEAD(&priv->list_attached); | |
4be6a290 | 316 | |
3e116c3c JR |
317 | priv->domain.geometry.aperture_start = 0; |
318 | priv->domain.geometry.aperture_end = (1ULL << 32) - 1; | |
319 | priv->domain.geometry.force_aperture = true; | |
4be6a290 | 320 | |
3e116c3c | 321 | return &priv->domain; |
0720d1f0 SM |
322 | |
323 | fail_nomem: | |
324 | kfree(priv); | |
3e116c3c | 325 | return NULL; |
0720d1f0 SM |
326 | } |
327 | ||
3e116c3c | 328 | static void msm_iommu_domain_free(struct iommu_domain *domain) |
0720d1f0 SM |
329 | { |
330 | struct msm_priv *priv; | |
331 | unsigned long flags; | |
0720d1f0 SM |
332 | |
333 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
3e116c3c | 334 | priv = to_msm_priv(domain); |
c9220fbd S |
335 | kfree(priv); |
336 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
337 | } | |
0720d1f0 | 338 | |
c9220fbd S |
339 | static int msm_iommu_domain_config(struct msm_priv *priv) |
340 | { | |
341 | spin_lock_init(&priv->pgtlock); | |
0720d1f0 | 342 | |
c9220fbd S |
343 | priv->cfg = (struct io_pgtable_cfg) { |
344 | .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP, | |
345 | .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, | |
346 | .ias = 32, | |
347 | .oas = 32, | |
348 | .tlb = &msm_iommu_gather_ops, | |
349 | .iommu_dev = priv->dev, | |
350 | }; | |
0720d1f0 | 351 | |
c9220fbd S |
352 | priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); |
353 | if (!priv->iop) { | |
354 | dev_err(priv->dev, "Failed to allocate pgtable\n"); | |
355 | return -EINVAL; | |
356 | } | |
0720d1f0 | 357 | |
c9220fbd S |
358 | msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; |
359 | ||
360 | return 0; | |
0720d1f0 SM |
361 | } |
362 | ||
42df43b3 JR |
363 | /* Must be called under msm_iommu_lock */ |
364 | static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) | |
365 | { | |
366 | struct msm_iommu_dev *iommu, *ret = NULL; | |
367 | struct msm_iommu_ctx_dev *master; | |
368 | ||
369 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { | |
370 | master = list_first_entry(&iommu->ctx_list, | |
371 | struct msm_iommu_ctx_dev, | |
372 | list); | |
373 | if (master->of_node == dev->of_node) { | |
374 | ret = iommu; | |
375 | break; | |
376 | } | |
377 | } | |
378 | ||
379 | return ret; | |
380 | } | |
381 | ||
382 | static int msm_iommu_add_device(struct device *dev) | |
383 | { | |
384 | struct msm_iommu_dev *iommu; | |
ce2eb8f4 | 385 | struct iommu_group *group; |
42df43b3 | 386 | unsigned long flags; |
42df43b3 JR |
387 | |
388 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
42df43b3 | 389 | iommu = find_iommu_for_dev(dev); |
37952146 NC |
390 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
391 | ||
42df43b3 JR |
392 | if (iommu) |
393 | iommu_device_link(&iommu->iommu, dev); | |
394 | else | |
37952146 | 395 | return -ENODEV; |
ce2eb8f4 RM |
396 | |
397 | group = iommu_group_get_for_dev(dev); | |
398 | if (IS_ERR(group)) | |
399 | return PTR_ERR(group); | |
400 | ||
401 | iommu_group_put(group); | |
402 | ||
403 | return 0; | |
42df43b3 JR |
404 | } |
405 | ||
406 | static void msm_iommu_remove_device(struct device *dev) | |
407 | { | |
408 | struct msm_iommu_dev *iommu; | |
409 | unsigned long flags; | |
410 | ||
411 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
42df43b3 | 412 | iommu = find_iommu_for_dev(dev); |
37952146 NC |
413 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
414 | ||
42df43b3 JR |
415 | if (iommu) |
416 | iommu_device_unlink(&iommu->iommu, dev); | |
417 | ||
ce2eb8f4 | 418 | iommu_group_remove_device(dev); |
42df43b3 JR |
419 | } |
420 | ||
0720d1f0 SM |
421 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
422 | { | |
0720d1f0 SM |
423 | int ret = 0; |
424 | unsigned long flags; | |
109bd48e S |
425 | struct msm_iommu_dev *iommu; |
426 | struct msm_priv *priv = to_msm_priv(domain); | |
427 | struct msm_iommu_ctx_dev *master; | |
0720d1f0 | 428 | |
c9220fbd S |
429 | priv->dev = dev; |
430 | msm_iommu_domain_config(priv); | |
431 | ||
0720d1f0 | 432 | spin_lock_irqsave(&msm_iommu_lock, flags); |
109bd48e S |
433 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
434 | master = list_first_entry(&iommu->ctx_list, | |
435 | struct msm_iommu_ctx_dev, | |
436 | list); | |
437 | if (master->of_node == dev->of_node) { | |
438 | ret = __enable_clocks(iommu); | |
439 | if (ret) | |
440 | goto fail; | |
441 | ||
442 | list_for_each_entry(master, &iommu->ctx_list, list) { | |
443 | if (master->num) { | |
444 | dev_err(dev, "domain already attached"); | |
445 | ret = -EEXIST; | |
446 | goto fail; | |
447 | } | |
448 | master->num = | |
449 | msm_iommu_alloc_ctx(iommu->context_map, | |
450 | 0, iommu->ncb); | |
ba93c357 JL |
451 | if (IS_ERR_VALUE(master->num)) { |
452 | ret = -ENODEV; | |
453 | goto fail; | |
454 | } | |
109bd48e S |
455 | config_mids(iommu, master); |
456 | __program_context(iommu->base, master->num, | |
c9220fbd | 457 | priv); |
109bd48e S |
458 | } |
459 | __disable_clocks(iommu); | |
460 | list_add(&iommu->dom_node, &priv->list_attached); | |
0720d1f0 | 461 | } |
109bd48e | 462 | } |
0720d1f0 | 463 | |
0720d1f0 SM |
464 | fail: |
465 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
109bd48e | 466 | |
0720d1f0 SM |
467 | return ret; |
468 | } | |
469 | ||
470 | static void msm_iommu_detach_dev(struct iommu_domain *domain, | |
471 | struct device *dev) | |
472 | { | |
109bd48e | 473 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 474 | unsigned long flags; |
109bd48e S |
475 | struct msm_iommu_dev *iommu; |
476 | struct msm_iommu_ctx_dev *master; | |
33069739 | 477 | int ret; |
0720d1f0 | 478 | |
c9220fbd | 479 | free_io_pgtable_ops(priv->iop); |
33069739 | 480 | |
c9220fbd | 481 | spin_lock_irqsave(&msm_iommu_lock, flags); |
109bd48e S |
482 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
483 | ret = __enable_clocks(iommu); | |
484 | if (ret) | |
485 | goto fail; | |
0720d1f0 | 486 | |
109bd48e S |
487 | list_for_each_entry(master, &iommu->ctx_list, list) { |
488 | msm_iommu_free_ctx(iommu->context_map, master->num); | |
489 | __reset_context(iommu->base, master->num); | |
490 | } | |
491 | __disable_clocks(iommu); | |
492 | } | |
0720d1f0 SM |
493 | fail: |
494 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
495 | } | |
496 | ||
c9220fbd | 497 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, |
5009065d | 498 | phys_addr_t pa, size_t len, int prot) |
0720d1f0 | 499 | { |
c9220fbd | 500 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 501 | unsigned long flags; |
c9220fbd | 502 | int ret; |
0720d1f0 | 503 | |
c9220fbd S |
504 | spin_lock_irqsave(&priv->pgtlock, flags); |
505 | ret = priv->iop->map(priv->iop, iova, pa, len, prot); | |
506 | spin_unlock_irqrestore(&priv->pgtlock, flags); | |
0720d1f0 | 507 | |
0720d1f0 SM |
508 | return ret; |
509 | } | |
510 | ||
c9220fbd S |
511 | static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
512 | size_t len) | |
0720d1f0 | 513 | { |
c9220fbd | 514 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 515 | unsigned long flags; |
0720d1f0 | 516 | |
c9220fbd S |
517 | spin_lock_irqsave(&priv->pgtlock, flags); |
518 | len = priv->iop->unmap(priv->iop, iova, len); | |
519 | spin_unlock_irqrestore(&priv->pgtlock, flags); | |
0720d1f0 | 520 | |
5009065d | 521 | return len; |
0720d1f0 SM |
522 | } |
523 | ||
524 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | |
bb5547ac | 525 | dma_addr_t va) |
0720d1f0 SM |
526 | { |
527 | struct msm_priv *priv; | |
109bd48e S |
528 | struct msm_iommu_dev *iommu; |
529 | struct msm_iommu_ctx_dev *master; | |
0720d1f0 SM |
530 | unsigned int par; |
531 | unsigned long flags; | |
0720d1f0 | 532 | phys_addr_t ret = 0; |
0720d1f0 SM |
533 | |
534 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
535 | ||
3e116c3c | 536 | priv = to_msm_priv(domain); |
109bd48e S |
537 | iommu = list_first_entry(&priv->list_attached, |
538 | struct msm_iommu_dev, dom_node); | |
0720d1f0 | 539 | |
109bd48e S |
540 | if (list_empty(&iommu->ctx_list)) |
541 | goto fail; | |
0720d1f0 | 542 | |
109bd48e S |
543 | master = list_first_entry(&iommu->ctx_list, |
544 | struct msm_iommu_ctx_dev, list); | |
545 | if (!master) | |
546 | goto fail; | |
0720d1f0 | 547 | |
109bd48e | 548 | ret = __enable_clocks(iommu); |
41f3f513 SM |
549 | if (ret) |
550 | goto fail; | |
551 | ||
0720d1f0 | 552 | /* Invalidate context TLB */ |
109bd48e S |
553 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
554 | SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); | |
0720d1f0 | 555 | |
109bd48e | 556 | par = GET_PAR(iommu->base, master->num); |
0720d1f0 SM |
557 | |
558 | /* We are dealing with a supersection */ | |
109bd48e | 559 | if (GET_NOFAULT_SS(iommu->base, master->num)) |
0720d1f0 SM |
560 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); |
561 | else /* Upper 20 bits from PAR, lower 12 from VA */ | |
562 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); | |
563 | ||
109bd48e | 564 | if (GET_FAULT(iommu->base, master->num)) |
33069739 SM |
565 | ret = 0; |
566 | ||
109bd48e | 567 | __disable_clocks(iommu); |
0720d1f0 SM |
568 | fail: |
569 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
570 | return ret; | |
571 | } | |
572 | ||
4480845e | 573 | static bool msm_iommu_capable(enum iommu_cap cap) |
0720d1f0 | 574 | { |
4480845e | 575 | return false; |
0720d1f0 SM |
576 | } |
577 | ||
578 | static void print_ctx_regs(void __iomem *base, int ctx) | |
579 | { | |
580 | unsigned int fsr = GET_FSR(base, ctx); | |
581 | pr_err("FAR = %08x PAR = %08x\n", | |
582 | GET_FAR(base, ctx), GET_PAR(base, ctx)); | |
583 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, | |
584 | (fsr & 0x02) ? "TF " : "", | |
585 | (fsr & 0x04) ? "AFF " : "", | |
586 | (fsr & 0x08) ? "APF " : "", | |
587 | (fsr & 0x10) ? "TLBMF " : "", | |
588 | (fsr & 0x20) ? "HTWDEEF " : "", | |
589 | (fsr & 0x40) ? "HTWSEEF " : "", | |
590 | (fsr & 0x80) ? "MHF " : "", | |
591 | (fsr & 0x10000) ? "SL " : "", | |
592 | (fsr & 0x40000000) ? "SS " : "", | |
593 | (fsr & 0x80000000) ? "MULTI " : ""); | |
594 | ||
595 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", | |
596 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); | |
597 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", | |
598 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); | |
599 | pr_err("SCTLR = %08x ACTLR = %08x\n", | |
600 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); | |
0720d1f0 SM |
601 | } |
602 | ||
f78ebca8 S |
603 | static void insert_iommu_master(struct device *dev, |
604 | struct msm_iommu_dev **iommu, | |
605 | struct of_phandle_args *spec) | |
606 | { | |
607 | struct msm_iommu_ctx_dev *master = dev->archdata.iommu; | |
608 | int sid; | |
609 | ||
610 | if (list_empty(&(*iommu)->ctx_list)) { | |
611 | master = kzalloc(sizeof(*master), GFP_ATOMIC); | |
612 | master->of_node = dev->of_node; | |
613 | list_add(&master->list, &(*iommu)->ctx_list); | |
614 | dev->archdata.iommu = master; | |
615 | } | |
616 | ||
617 | for (sid = 0; sid < master->num_mids; sid++) | |
618 | if (master->mids[sid] == spec->args[0]) { | |
619 | dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n", | |
620 | sid); | |
621 | return; | |
622 | } | |
623 | ||
624 | master->mids[master->num_mids++] = spec->args[0]; | |
625 | } | |
626 | ||
627 | static int qcom_iommu_of_xlate(struct device *dev, | |
628 | struct of_phandle_args *spec) | |
629 | { | |
630 | struct msm_iommu_dev *iommu; | |
631 | unsigned long flags; | |
632 | int ret = 0; | |
633 | ||
634 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
635 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) | |
636 | if (iommu->dev->of_node == spec->np) | |
637 | break; | |
638 | ||
639 | if (!iommu || iommu->dev->of_node != spec->np) { | |
640 | ret = -ENODEV; | |
641 | goto fail; | |
642 | } | |
643 | ||
644 | insert_iommu_master(dev, &iommu, spec); | |
645 | fail: | |
646 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
647 | ||
648 | return ret; | |
649 | } | |
650 | ||
0720d1f0 SM |
651 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) |
652 | { | |
109bd48e | 653 | struct msm_iommu_dev *iommu = dev_id; |
33069739 | 654 | unsigned int fsr; |
a43d8c10 | 655 | int i, ret; |
0720d1f0 SM |
656 | |
657 | spin_lock(&msm_iommu_lock); | |
658 | ||
109bd48e | 659 | if (!iommu) { |
0720d1f0 SM |
660 | pr_err("Invalid device ID in context interrupt handler\n"); |
661 | goto fail; | |
662 | } | |
663 | ||
0720d1f0 | 664 | pr_err("Unexpected IOMMU page fault!\n"); |
109bd48e | 665 | pr_err("base = %08x\n", (unsigned int)iommu->base); |
0720d1f0 | 666 | |
109bd48e | 667 | ret = __enable_clocks(iommu); |
41f3f513 SM |
668 | if (ret) |
669 | goto fail; | |
670 | ||
109bd48e S |
671 | for (i = 0; i < iommu->ncb; i++) { |
672 | fsr = GET_FSR(iommu->base, i); | |
0720d1f0 SM |
673 | if (fsr) { |
674 | pr_err("Fault occurred in context %d.\n", i); | |
675 | pr_err("Interesting registers:\n"); | |
109bd48e S |
676 | print_ctx_regs(iommu->base, i); |
677 | SET_FSR(iommu->base, i, 0x4000000F); | |
0720d1f0 SM |
678 | } |
679 | } | |
109bd48e | 680 | __disable_clocks(iommu); |
0720d1f0 SM |
681 | fail: |
682 | spin_unlock(&msm_iommu_lock); | |
683 | return 0; | |
684 | } | |
685 | ||
f78ebca8 | 686 | static struct iommu_ops msm_iommu_ops = { |
4480845e | 687 | .capable = msm_iommu_capable, |
3e116c3c JR |
688 | .domain_alloc = msm_iommu_domain_alloc, |
689 | .domain_free = msm_iommu_domain_free, | |
0720d1f0 SM |
690 | .attach_dev = msm_iommu_attach_dev, |
691 | .detach_dev = msm_iommu_detach_dev, | |
692 | .map = msm_iommu_map, | |
693 | .unmap = msm_iommu_unmap, | |
694 | .iova_to_phys = msm_iommu_iova_to_phys, | |
42df43b3 JR |
695 | .add_device = msm_iommu_add_device, |
696 | .remove_device = msm_iommu_remove_device, | |
ce2eb8f4 | 697 | .device_group = generic_device_group, |
83427275 | 698 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
f78ebca8 | 699 | .of_xlate = qcom_iommu_of_xlate, |
0720d1f0 SM |
700 | }; |
701 | ||
f7f125ef S |
702 | static int msm_iommu_probe(struct platform_device *pdev) |
703 | { | |
704 | struct resource *r; | |
42df43b3 | 705 | resource_size_t ioaddr; |
f7f125ef S |
706 | struct msm_iommu_dev *iommu; |
707 | int ret, par, val; | |
708 | ||
709 | iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); | |
710 | if (!iommu) | |
711 | return -ENODEV; | |
712 | ||
713 | iommu->dev = &pdev->dev; | |
714 | INIT_LIST_HEAD(&iommu->ctx_list); | |
715 | ||
716 | iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk"); | |
717 | if (IS_ERR(iommu->pclk)) { | |
718 | dev_err(iommu->dev, "could not get smmu_pclk\n"); | |
719 | return PTR_ERR(iommu->pclk); | |
720 | } | |
721 | ||
722 | ret = clk_prepare(iommu->pclk); | |
723 | if (ret) { | |
724 | dev_err(iommu->dev, "could not prepare smmu_pclk\n"); | |
725 | return ret; | |
726 | } | |
727 | ||
728 | iommu->clk = devm_clk_get(iommu->dev, "iommu_clk"); | |
729 | if (IS_ERR(iommu->clk)) { | |
730 | dev_err(iommu->dev, "could not get iommu_clk\n"); | |
731 | clk_unprepare(iommu->pclk); | |
732 | return PTR_ERR(iommu->clk); | |
733 | } | |
734 | ||
735 | ret = clk_prepare(iommu->clk); | |
736 | if (ret) { | |
737 | dev_err(iommu->dev, "could not prepare iommu_clk\n"); | |
738 | clk_unprepare(iommu->pclk); | |
739 | return ret; | |
740 | } | |
741 | ||
742 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
743 | iommu->base = devm_ioremap_resource(iommu->dev, r); | |
744 | if (IS_ERR(iommu->base)) { | |
745 | dev_err(iommu->dev, "could not get iommu base\n"); | |
746 | ret = PTR_ERR(iommu->base); | |
747 | goto fail; | |
748 | } | |
42df43b3 | 749 | ioaddr = r->start; |
f7f125ef S |
750 | |
751 | iommu->irq = platform_get_irq(pdev, 0); | |
752 | if (iommu->irq < 0) { | |
753 | dev_err(iommu->dev, "could not get iommu irq\n"); | |
754 | ret = -ENODEV; | |
755 | goto fail; | |
756 | } | |
757 | ||
758 | ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val); | |
759 | if (ret) { | |
760 | dev_err(iommu->dev, "could not get ncb\n"); | |
761 | goto fail; | |
762 | } | |
763 | iommu->ncb = val; | |
764 | ||
765 | msm_iommu_reset(iommu->base, iommu->ncb); | |
766 | SET_M(iommu->base, 0, 1); | |
767 | SET_PAR(iommu->base, 0, 0); | |
768 | SET_V2PCFG(iommu->base, 0, 1); | |
769 | SET_V2PPR(iommu->base, 0, 0); | |
770 | par = GET_PAR(iommu->base, 0); | |
771 | SET_V2PCFG(iommu->base, 0, 0); | |
772 | SET_M(iommu->base, 0, 0); | |
773 | ||
774 | if (!par) { | |
775 | pr_err("Invalid PAR value detected\n"); | |
776 | ret = -ENODEV; | |
777 | goto fail; | |
778 | } | |
779 | ||
780 | ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL, | |
781 | msm_iommu_fault_handler, | |
782 | IRQF_ONESHOT | IRQF_SHARED, | |
783 | "msm_iommu_secure_irpt_handler", | |
784 | iommu); | |
785 | if (ret) { | |
786 | pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret); | |
787 | goto fail; | |
788 | } | |
789 | ||
790 | list_add(&iommu->dev_node, &qcom_iommu_devices); | |
42df43b3 JR |
791 | |
792 | ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL, | |
793 | "msm-smmu.%pa", &ioaddr); | |
794 | if (ret) { | |
795 | pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr); | |
796 | goto fail; | |
797 | } | |
798 | ||
799 | iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops); | |
800 | iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); | |
801 | ||
802 | ret = iommu_device_register(&iommu->iommu); | |
803 | if (ret) { | |
804 | pr_err("Could not register msm-smmu at %pa\n", &ioaddr); | |
805 | goto fail; | |
806 | } | |
807 | ||
892d7aad RM |
808 | bus_set_iommu(&platform_bus_type, &msm_iommu_ops); |
809 | ||
f7f125ef S |
810 | pr_info("device mapped at %p, irq %d with %d ctx banks\n", |
811 | iommu->base, iommu->irq, iommu->ncb); | |
812 | ||
813 | return ret; | |
814 | fail: | |
815 | clk_unprepare(iommu->clk); | |
816 | clk_unprepare(iommu->pclk); | |
817 | return ret; | |
818 | } | |
819 | ||
820 | static const struct of_device_id msm_iommu_dt_match[] = { | |
821 | { .compatible = "qcom,apq8064-iommu" }, | |
822 | {} | |
823 | }; | |
824 | ||
825 | static int msm_iommu_remove(struct platform_device *pdev) | |
826 | { | |
827 | struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); | |
828 | ||
829 | clk_unprepare(iommu->clk); | |
830 | clk_unprepare(iommu->pclk); | |
831 | return 0; | |
832 | } | |
833 | ||
834 | static struct platform_driver msm_iommu_driver = { | |
835 | .driver = { | |
836 | .name = "msm_iommu", | |
837 | .of_match_table = msm_iommu_dt_match, | |
838 | }, | |
839 | .probe = msm_iommu_probe, | |
840 | .remove = msm_iommu_remove, | |
841 | }; | |
842 | ||
843 | static int __init msm_iommu_driver_init(void) | |
844 | { | |
845 | int ret; | |
846 | ||
847 | ret = platform_driver_register(&msm_iommu_driver); | |
848 | if (ret != 0) | |
849 | pr_err("Failed to register IOMMU driver\n"); | |
850 | ||
851 | return ret; | |
852 | } | |
f7f125ef | 853 | subsys_initcall(msm_iommu_driver_init); |
f7f125ef | 854 |