Commit | Line | Data |
---|---|---|
08dbd0f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
41f3f513 | 2 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
a007dd51 PG |
3 | * |
4 | * Author: Stepan Moskovchenko <stepanm@codeaurora.org> | |
0720d1f0 SM |
5 | */ |
6 | ||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
8 | #include <linux/kernel.h> | |
a007dd51 | 9 | #include <linux/init.h> |
0720d1f0 SM |
10 | #include <linux/platform_device.h> |
11 | #include <linux/errno.h> | |
12 | #include <linux/io.h> | |
b77cf11f | 13 | #include <linux/io-pgtable.h> |
0720d1f0 SM |
14 | #include <linux/interrupt.h> |
15 | #include <linux/list.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/iommu.h> | |
41f3f513 | 19 | #include <linux/clk.h> |
f7f125ef | 20 | #include <linux/err.h> |
f78ebca8 | 21 | #include <linux/of_iommu.h> |
0720d1f0 SM |
22 | |
23 | #include <asm/cacheflush.h> | |
87dfb311 | 24 | #include <linux/sizes.h> |
0720d1f0 | 25 | |
0b559df5 SB |
26 | #include "msm_iommu_hw-8xxx.h" |
27 | #include "msm_iommu.h" | |
0720d1f0 | 28 | |
100832c9 SM |
29 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
30 | __asm__ __volatile__ ( \ | |
31 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ | |
32 | : "=r" (reg)) | |
33 | ||
83427275 OBC |
34 | /* bitmap of the page sizes currently supported */ |
35 | #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) | |
36 | ||
c4e0f3b2 | 37 | static DEFINE_SPINLOCK(msm_iommu_lock); |
109bd48e | 38 | static LIST_HEAD(qcom_iommu_devices); |
c9220fbd | 39 | static struct iommu_ops msm_iommu_ops; |
0720d1f0 SM |
40 | |
41 | struct msm_priv { | |
0720d1f0 | 42 | struct list_head list_attached; |
3e116c3c | 43 | struct iommu_domain domain; |
c9220fbd S |
44 | struct io_pgtable_cfg cfg; |
45 | struct io_pgtable_ops *iop; | |
46 | struct device *dev; | |
47 | spinlock_t pgtlock; /* pagetable lock */ | |
0720d1f0 SM |
48 | }; |
49 | ||
3e116c3c JR |
50 | static struct msm_priv *to_msm_priv(struct iommu_domain *dom) |
51 | { | |
52 | return container_of(dom, struct msm_priv, domain); | |
53 | } | |
54 | ||
109bd48e | 55 | static int __enable_clocks(struct msm_iommu_dev *iommu) |
41f3f513 SM |
56 | { |
57 | int ret; | |
58 | ||
109bd48e | 59 | ret = clk_enable(iommu->pclk); |
41f3f513 SM |
60 | if (ret) |
61 | goto fail; | |
62 | ||
109bd48e S |
63 | if (iommu->clk) { |
64 | ret = clk_enable(iommu->clk); | |
41f3f513 | 65 | if (ret) |
109bd48e | 66 | clk_disable(iommu->pclk); |
41f3f513 SM |
67 | } |
68 | fail: | |
69 | return ret; | |
70 | } | |
71 | ||
109bd48e | 72 | static void __disable_clocks(struct msm_iommu_dev *iommu) |
41f3f513 | 73 | { |
109bd48e S |
74 | if (iommu->clk) |
75 | clk_disable(iommu->clk); | |
76 | clk_disable(iommu->pclk); | |
41f3f513 SM |
77 | } |
78 | ||
f7f125ef S |
79 | static void msm_iommu_reset(void __iomem *base, int ncb) |
80 | { | |
81 | int ctx; | |
82 | ||
83 | SET_RPUE(base, 0); | |
84 | SET_RPUEIE(base, 0); | |
85 | SET_ESRRESTORE(base, 0); | |
86 | SET_TBE(base, 0); | |
87 | SET_CR(base, 0); | |
88 | SET_SPDMBE(base, 0); | |
89 | SET_TESTBUSCR(base, 0); | |
90 | SET_TLBRSW(base, 0); | |
91 | SET_GLOBAL_TLBIALL(base, 0); | |
92 | SET_RPU_ACR(base, 0); | |
93 | SET_TLBLKCRWE(base, 1); | |
94 | ||
95 | for (ctx = 0; ctx < ncb; ctx++) { | |
96 | SET_BPRCOSH(base, ctx, 0); | |
97 | SET_BPRCISH(base, ctx, 0); | |
98 | SET_BPRCNSH(base, ctx, 0); | |
99 | SET_BPSHCFG(base, ctx, 0); | |
100 | SET_BPMTCFG(base, ctx, 0); | |
101 | SET_ACTLR(base, ctx, 0); | |
102 | SET_SCTLR(base, ctx, 0); | |
103 | SET_FSRRESTORE(base, ctx, 0); | |
104 | SET_TTBR0(base, ctx, 0); | |
105 | SET_TTBR1(base, ctx, 0); | |
106 | SET_TTBCR(base, ctx, 0); | |
107 | SET_BFBCR(base, ctx, 0); | |
108 | SET_PAR(base, ctx, 0); | |
109 | SET_FAR(base, ctx, 0); | |
110 | SET_CTX_TLBIALL(base, ctx, 0); | |
111 | SET_TLBFLPTER(base, ctx, 0); | |
112 | SET_TLBSLPTER(base, ctx, 0); | |
113 | SET_TLBLKCR(base, ctx, 0); | |
f7f125ef S |
114 | SET_CONTEXTIDR(base, ctx, 0); |
115 | } | |
116 | } | |
117 | ||
c9220fbd | 118 | static void __flush_iotlb(void *cookie) |
0720d1f0 | 119 | { |
c9220fbd | 120 | struct msm_priv *priv = cookie; |
109bd48e S |
121 | struct msm_iommu_dev *iommu = NULL; |
122 | struct msm_iommu_ctx_dev *master; | |
33069739 | 123 | int ret = 0; |
109bd48e | 124 | |
c9220fbd S |
125 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
126 | ret = __enable_clocks(iommu); | |
127 | if (ret) | |
128 | goto fail; | |
0720d1f0 | 129 | |
c9220fbd S |
130 | list_for_each_entry(master, &iommu->ctx_list, list) |
131 | SET_CTX_TLBIALL(iommu->base, master->num, 0); | |
0720d1f0 | 132 | |
c9220fbd | 133 | __disable_clocks(iommu); |
f6f41eb9 | 134 | } |
c9220fbd S |
135 | fail: |
136 | return; | |
137 | } | |
138 | ||
139 | static void __flush_iotlb_range(unsigned long iova, size_t size, | |
140 | size_t granule, bool leaf, void *cookie) | |
141 | { | |
142 | struct msm_priv *priv = cookie; | |
143 | struct msm_iommu_dev *iommu = NULL; | |
144 | struct msm_iommu_ctx_dev *master; | |
145 | int ret = 0; | |
146 | int temp_size; | |
0720d1f0 | 147 | |
109bd48e S |
148 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
149 | ret = __enable_clocks(iommu); | |
41f3f513 SM |
150 | if (ret) |
151 | goto fail; | |
152 | ||
c9220fbd S |
153 | list_for_each_entry(master, &iommu->ctx_list, list) { |
154 | temp_size = size; | |
155 | do { | |
156 | iova &= TLBIVA_VA; | |
157 | iova |= GET_CONTEXTIDR_ASID(iommu->base, | |
158 | master->num); | |
159 | SET_TLBIVA(iommu->base, master->num, iova); | |
160 | iova += granule; | |
161 | } while (temp_size -= granule); | |
162 | } | |
109bd48e S |
163 | |
164 | __disable_clocks(iommu); | |
0720d1f0 | 165 | } |
c9220fbd | 166 | |
41f3f513 | 167 | fail: |
c9220fbd | 168 | return; |
0720d1f0 SM |
169 | } |
170 | ||
05aed941 WD |
171 | static void __flush_iotlb_walk(unsigned long iova, size_t size, |
172 | size_t granule, void *cookie) | |
c9220fbd | 173 | { |
05aed941 | 174 | __flush_iotlb_range(iova, size, granule, false, cookie); |
05aed941 WD |
175 | } |
176 | ||
3951c41a WD |
177 | static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, |
178 | unsigned long iova, size_t granule, void *cookie) | |
abfd6fe0 WD |
179 | { |
180 | __flush_iotlb_range(iova, granule, granule, true, cookie); | |
181 | } | |
182 | ||
298f7889 | 183 | static const struct iommu_flush_ops msm_iommu_flush_ops = { |
c9220fbd | 184 | .tlb_flush_all = __flush_iotlb, |
05aed941 | 185 | .tlb_flush_walk = __flush_iotlb_walk, |
abfd6fe0 | 186 | .tlb_add_page = __flush_iotlb_page, |
c9220fbd S |
187 | }; |
188 | ||
109bd48e S |
189 | static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) |
190 | { | |
191 | int idx; | |
192 | ||
193 | do { | |
194 | idx = find_next_zero_bit(map, end, start); | |
195 | if (idx == end) | |
196 | return -ENOSPC; | |
197 | } while (test_and_set_bit(idx, map)); | |
198 | ||
199 | return idx; | |
200 | } | |
201 | ||
202 | static void msm_iommu_free_ctx(unsigned long *map, int idx) | |
203 | { | |
204 | clear_bit(idx, map); | |
205 | } | |
206 | ||
207 | static void config_mids(struct msm_iommu_dev *iommu, | |
208 | struct msm_iommu_ctx_dev *master) | |
209 | { | |
210 | int mid, ctx, i; | |
211 | ||
212 | for (i = 0; i < master->num_mids; i++) { | |
213 | mid = master->mids[i]; | |
214 | ctx = master->num; | |
215 | ||
216 | SET_M2VCBR_N(iommu->base, mid, 0); | |
217 | SET_CBACR_N(iommu->base, ctx, 0); | |
218 | ||
219 | /* Set VMID = 0 */ | |
220 | SET_VMID(iommu->base, mid, 0); | |
221 | ||
222 | /* Set the context number for that MID to this context */ | |
223 | SET_CBNDX(iommu->base, mid, ctx); | |
224 | ||
225 | /* Set MID associated with this context bank to 0*/ | |
226 | SET_CBVMID(iommu->base, ctx, 0); | |
227 | ||
228 | /* Set the ASID for TLB tagging for this context */ | |
229 | SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); | |
230 | ||
231 | /* Set security bit override to be Non-secure */ | |
232 | SET_NSCFG(iommu->base, mid, 3); | |
233 | } | |
234 | } | |
235 | ||
0720d1f0 SM |
236 | static void __reset_context(void __iomem *base, int ctx) |
237 | { | |
238 | SET_BPRCOSH(base, ctx, 0); | |
239 | SET_BPRCISH(base, ctx, 0); | |
240 | SET_BPRCNSH(base, ctx, 0); | |
241 | SET_BPSHCFG(base, ctx, 0); | |
242 | SET_BPMTCFG(base, ctx, 0); | |
243 | SET_ACTLR(base, ctx, 0); | |
244 | SET_SCTLR(base, ctx, 0); | |
245 | SET_FSRRESTORE(base, ctx, 0); | |
246 | SET_TTBR0(base, ctx, 0); | |
247 | SET_TTBR1(base, ctx, 0); | |
248 | SET_TTBCR(base, ctx, 0); | |
249 | SET_BFBCR(base, ctx, 0); | |
250 | SET_PAR(base, ctx, 0); | |
251 | SET_FAR(base, ctx, 0); | |
252 | SET_CTX_TLBIALL(base, ctx, 0); | |
253 | SET_TLBFLPTER(base, ctx, 0); | |
254 | SET_TLBSLPTER(base, ctx, 0); | |
255 | SET_TLBLKCR(base, ctx, 0); | |
0720d1f0 SM |
256 | } |
257 | ||
c9220fbd S |
258 | static void __program_context(void __iomem *base, int ctx, |
259 | struct msm_priv *priv) | |
0720d1f0 SM |
260 | { |
261 | __reset_context(base, ctx); | |
262 | ||
c9220fbd S |
263 | /* Turn on TEX Remap */ |
264 | SET_TRE(base, ctx, 1); | |
265 | SET_AFE(base, ctx, 1); | |
266 | ||
0720d1f0 SM |
267 | /* Set up HTW mode */ |
268 | /* TLB miss configuration: perform HTW on miss */ | |
269 | SET_TLBMCFG(base, ctx, 0x3); | |
270 | ||
271 | /* V2P configuration: HTW for access */ | |
272 | SET_V2PCFG(base, ctx, 0x3); | |
273 | ||
c9220fbd | 274 | SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); |
d1e5f26f RM |
275 | SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr); |
276 | SET_TTBR1(base, ctx, 0); | |
c9220fbd S |
277 | |
278 | /* Set prrr and nmrr */ | |
279 | SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); | |
280 | SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); | |
0720d1f0 SM |
281 | |
282 | /* Invalidate the TLB for this context */ | |
283 | SET_CTX_TLBIALL(base, ctx, 0); | |
284 | ||
285 | /* Set interrupt number to "secure" interrupt */ | |
286 | SET_IRPTNDX(base, ctx, 0); | |
287 | ||
288 | /* Enable context fault interrupt */ | |
289 | SET_CFEIE(base, ctx, 1); | |
290 | ||
291 | /* Stall access on a context fault and let the handler deal with it */ | |
292 | SET_CFCFG(base, ctx, 1); | |
293 | ||
294 | /* Redirect all cacheable requests to L2 slave port. */ | |
295 | SET_RCISH(base, ctx, 1); | |
296 | SET_RCOSH(base, ctx, 1); | |
297 | SET_RCNSH(base, ctx, 1); | |
298 | ||
0720d1f0 SM |
299 | /* Turn on BFB prefetch */ |
300 | SET_BFBDFE(base, ctx, 1); | |
301 | ||
0720d1f0 SM |
302 | /* Enable the MMU */ |
303 | SET_M(base, ctx, 1); | |
304 | } | |
305 | ||
3e116c3c | 306 | static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) |
0720d1f0 | 307 | { |
3e116c3c | 308 | struct msm_priv *priv; |
0720d1f0 | 309 | |
3e116c3c JR |
310 | if (type != IOMMU_DOMAIN_UNMANAGED) |
311 | return NULL; | |
312 | ||
313 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
0720d1f0 SM |
314 | if (!priv) |
315 | goto fail_nomem; | |
316 | ||
317 | INIT_LIST_HEAD(&priv->list_attached); | |
4be6a290 | 318 | |
3e116c3c JR |
319 | priv->domain.geometry.aperture_start = 0; |
320 | priv->domain.geometry.aperture_end = (1ULL << 32) - 1; | |
321 | priv->domain.geometry.force_aperture = true; | |
4be6a290 | 322 | |
3e116c3c | 323 | return &priv->domain; |
0720d1f0 SM |
324 | |
325 | fail_nomem: | |
326 | kfree(priv); | |
3e116c3c | 327 | return NULL; |
0720d1f0 SM |
328 | } |
329 | ||
3e116c3c | 330 | static void msm_iommu_domain_free(struct iommu_domain *domain) |
0720d1f0 SM |
331 | { |
332 | struct msm_priv *priv; | |
333 | unsigned long flags; | |
0720d1f0 SM |
334 | |
335 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
3e116c3c | 336 | priv = to_msm_priv(domain); |
c9220fbd S |
337 | kfree(priv); |
338 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
339 | } | |
0720d1f0 | 340 | |
c9220fbd S |
341 | static int msm_iommu_domain_config(struct msm_priv *priv) |
342 | { | |
343 | spin_lock_init(&priv->pgtlock); | |
0720d1f0 | 344 | |
c9220fbd S |
345 | priv->cfg = (struct io_pgtable_cfg) { |
346 | .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP, | |
347 | .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, | |
348 | .ias = 32, | |
349 | .oas = 32, | |
298f7889 | 350 | .tlb = &msm_iommu_flush_ops, |
c9220fbd S |
351 | .iommu_dev = priv->dev, |
352 | }; | |
0720d1f0 | 353 | |
c9220fbd S |
354 | priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); |
355 | if (!priv->iop) { | |
356 | dev_err(priv->dev, "Failed to allocate pgtable\n"); | |
357 | return -EINVAL; | |
358 | } | |
0720d1f0 | 359 | |
c9220fbd S |
360 | msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; |
361 | ||
362 | return 0; | |
0720d1f0 SM |
363 | } |
364 | ||
42df43b3 JR |
365 | /* Must be called under msm_iommu_lock */ |
366 | static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) | |
367 | { | |
368 | struct msm_iommu_dev *iommu, *ret = NULL; | |
369 | struct msm_iommu_ctx_dev *master; | |
370 | ||
371 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { | |
372 | master = list_first_entry(&iommu->ctx_list, | |
373 | struct msm_iommu_ctx_dev, | |
374 | list); | |
375 | if (master->of_node == dev->of_node) { | |
376 | ret = iommu; | |
377 | break; | |
378 | } | |
379 | } | |
380 | ||
381 | return ret; | |
382 | } | |
383 | ||
dea74f1c | 384 | static struct iommu_device *msm_iommu_probe_device(struct device *dev) |
42df43b3 JR |
385 | { |
386 | struct msm_iommu_dev *iommu; | |
387 | unsigned long flags; | |
42df43b3 JR |
388 | |
389 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
42df43b3 | 390 | iommu = find_iommu_for_dev(dev); |
37952146 NC |
391 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
392 | ||
dea74f1c JR |
393 | if (!iommu) |
394 | return ERR_PTR(-ENODEV); | |
ce2eb8f4 | 395 | |
dea74f1c | 396 | return &iommu->iommu; |
42df43b3 JR |
397 | } |
398 | ||
dea74f1c | 399 | static void msm_iommu_release_device(struct device *dev) |
42df43b3 | 400 | { |
42df43b3 JR |
401 | } |
402 | ||
0720d1f0 SM |
403 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
404 | { | |
0720d1f0 SM |
405 | int ret = 0; |
406 | unsigned long flags; | |
109bd48e S |
407 | struct msm_iommu_dev *iommu; |
408 | struct msm_priv *priv = to_msm_priv(domain); | |
409 | struct msm_iommu_ctx_dev *master; | |
0720d1f0 | 410 | |
c9220fbd S |
411 | priv->dev = dev; |
412 | msm_iommu_domain_config(priv); | |
413 | ||
0720d1f0 | 414 | spin_lock_irqsave(&msm_iommu_lock, flags); |
109bd48e S |
415 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
416 | master = list_first_entry(&iommu->ctx_list, | |
417 | struct msm_iommu_ctx_dev, | |
418 | list); | |
419 | if (master->of_node == dev->of_node) { | |
420 | ret = __enable_clocks(iommu); | |
421 | if (ret) | |
422 | goto fail; | |
423 | ||
424 | list_for_each_entry(master, &iommu->ctx_list, list) { | |
425 | if (master->num) { | |
426 | dev_err(dev, "domain already attached"); | |
427 | ret = -EEXIST; | |
428 | goto fail; | |
429 | } | |
430 | master->num = | |
431 | msm_iommu_alloc_ctx(iommu->context_map, | |
432 | 0, iommu->ncb); | |
ba93c357 JL |
433 | if (IS_ERR_VALUE(master->num)) { |
434 | ret = -ENODEV; | |
435 | goto fail; | |
436 | } | |
109bd48e S |
437 | config_mids(iommu, master); |
438 | __program_context(iommu->base, master->num, | |
c9220fbd | 439 | priv); |
109bd48e S |
440 | } |
441 | __disable_clocks(iommu); | |
442 | list_add(&iommu->dom_node, &priv->list_attached); | |
0720d1f0 | 443 | } |
109bd48e | 444 | } |
0720d1f0 | 445 | |
0720d1f0 SM |
446 | fail: |
447 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
109bd48e | 448 | |
0720d1f0 SM |
449 | return ret; |
450 | } | |
451 | ||
452 | static void msm_iommu_detach_dev(struct iommu_domain *domain, | |
453 | struct device *dev) | |
454 | { | |
109bd48e | 455 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 456 | unsigned long flags; |
109bd48e S |
457 | struct msm_iommu_dev *iommu; |
458 | struct msm_iommu_ctx_dev *master; | |
33069739 | 459 | int ret; |
0720d1f0 | 460 | |
c9220fbd | 461 | free_io_pgtable_ops(priv->iop); |
33069739 | 462 | |
c9220fbd | 463 | spin_lock_irqsave(&msm_iommu_lock, flags); |
109bd48e S |
464 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
465 | ret = __enable_clocks(iommu); | |
466 | if (ret) | |
467 | goto fail; | |
0720d1f0 | 468 | |
109bd48e S |
469 | list_for_each_entry(master, &iommu->ctx_list, list) { |
470 | msm_iommu_free_ctx(iommu->context_map, master->num); | |
471 | __reset_context(iommu->base, master->num); | |
472 | } | |
473 | __disable_clocks(iommu); | |
474 | } | |
0720d1f0 SM |
475 | fail: |
476 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
477 | } | |
478 | ||
c9220fbd | 479 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, |
781ca2de | 480 | phys_addr_t pa, size_t len, int prot, gfp_t gfp) |
0720d1f0 | 481 | { |
c9220fbd | 482 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 483 | unsigned long flags; |
c9220fbd | 484 | int ret; |
0720d1f0 | 485 | |
c9220fbd | 486 | spin_lock_irqsave(&priv->pgtlock, flags); |
f34ce7a7 | 487 | ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); |
c9220fbd | 488 | spin_unlock_irqrestore(&priv->pgtlock, flags); |
0720d1f0 | 489 | |
0720d1f0 SM |
490 | return ret; |
491 | } | |
492 | ||
c9220fbd | 493 | static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
56f8af5e | 494 | size_t len, struct iommu_iotlb_gather *gather) |
0720d1f0 | 495 | { |
c9220fbd | 496 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 497 | unsigned long flags; |
0720d1f0 | 498 | |
c9220fbd | 499 | spin_lock_irqsave(&priv->pgtlock, flags); |
a2d3a382 | 500 | len = priv->iop->unmap(priv->iop, iova, len, gather); |
c9220fbd | 501 | spin_unlock_irqrestore(&priv->pgtlock, flags); |
0720d1f0 | 502 | |
5009065d | 503 | return len; |
0720d1f0 SM |
504 | } |
505 | ||
506 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | |
bb5547ac | 507 | dma_addr_t va) |
0720d1f0 SM |
508 | { |
509 | struct msm_priv *priv; | |
109bd48e S |
510 | struct msm_iommu_dev *iommu; |
511 | struct msm_iommu_ctx_dev *master; | |
0720d1f0 SM |
512 | unsigned int par; |
513 | unsigned long flags; | |
0720d1f0 | 514 | phys_addr_t ret = 0; |
0720d1f0 SM |
515 | |
516 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
517 | ||
3e116c3c | 518 | priv = to_msm_priv(domain); |
109bd48e S |
519 | iommu = list_first_entry(&priv->list_attached, |
520 | struct msm_iommu_dev, dom_node); | |
0720d1f0 | 521 | |
109bd48e S |
522 | if (list_empty(&iommu->ctx_list)) |
523 | goto fail; | |
0720d1f0 | 524 | |
109bd48e S |
525 | master = list_first_entry(&iommu->ctx_list, |
526 | struct msm_iommu_ctx_dev, list); | |
527 | if (!master) | |
528 | goto fail; | |
0720d1f0 | 529 | |
109bd48e | 530 | ret = __enable_clocks(iommu); |
41f3f513 SM |
531 | if (ret) |
532 | goto fail; | |
533 | ||
0720d1f0 | 534 | /* Invalidate context TLB */ |
109bd48e S |
535 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
536 | SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); | |
0720d1f0 | 537 | |
109bd48e | 538 | par = GET_PAR(iommu->base, master->num); |
0720d1f0 SM |
539 | |
540 | /* We are dealing with a supersection */ | |
109bd48e | 541 | if (GET_NOFAULT_SS(iommu->base, master->num)) |
0720d1f0 SM |
542 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); |
543 | else /* Upper 20 bits from PAR, lower 12 from VA */ | |
544 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); | |
545 | ||
109bd48e | 546 | if (GET_FAULT(iommu->base, master->num)) |
33069739 SM |
547 | ret = 0; |
548 | ||
109bd48e | 549 | __disable_clocks(iommu); |
0720d1f0 SM |
550 | fail: |
551 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
552 | return ret; | |
553 | } | |
554 | ||
4480845e | 555 | static bool msm_iommu_capable(enum iommu_cap cap) |
0720d1f0 | 556 | { |
4480845e | 557 | return false; |
0720d1f0 SM |
558 | } |
559 | ||
560 | static void print_ctx_regs(void __iomem *base, int ctx) | |
561 | { | |
562 | unsigned int fsr = GET_FSR(base, ctx); | |
563 | pr_err("FAR = %08x PAR = %08x\n", | |
564 | GET_FAR(base, ctx), GET_PAR(base, ctx)); | |
565 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, | |
566 | (fsr & 0x02) ? "TF " : "", | |
567 | (fsr & 0x04) ? "AFF " : "", | |
568 | (fsr & 0x08) ? "APF " : "", | |
569 | (fsr & 0x10) ? "TLBMF " : "", | |
570 | (fsr & 0x20) ? "HTWDEEF " : "", | |
571 | (fsr & 0x40) ? "HTWSEEF " : "", | |
572 | (fsr & 0x80) ? "MHF " : "", | |
573 | (fsr & 0x10000) ? "SL " : "", | |
574 | (fsr & 0x40000000) ? "SS " : "", | |
575 | (fsr & 0x80000000) ? "MULTI " : ""); | |
576 | ||
577 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", | |
578 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); | |
579 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", | |
580 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); | |
581 | pr_err("SCTLR = %08x ACTLR = %08x\n", | |
582 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); | |
0720d1f0 SM |
583 | } |
584 | ||
f78ebca8 S |
585 | static void insert_iommu_master(struct device *dev, |
586 | struct msm_iommu_dev **iommu, | |
587 | struct of_phandle_args *spec) | |
588 | { | |
4bbe0c7c | 589 | struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev); |
f78ebca8 S |
590 | int sid; |
591 | ||
592 | if (list_empty(&(*iommu)->ctx_list)) { | |
593 | master = kzalloc(sizeof(*master), GFP_ATOMIC); | |
594 | master->of_node = dev->of_node; | |
595 | list_add(&master->list, &(*iommu)->ctx_list); | |
4bbe0c7c | 596 | dev_iommu_priv_set(dev, master); |
f78ebca8 S |
597 | } |
598 | ||
599 | for (sid = 0; sid < master->num_mids; sid++) | |
600 | if (master->mids[sid] == spec->args[0]) { | |
601 | dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n", | |
602 | sid); | |
603 | return; | |
604 | } | |
605 | ||
606 | master->mids[master->num_mids++] = spec->args[0]; | |
607 | } | |
608 | ||
609 | static int qcom_iommu_of_xlate(struct device *dev, | |
610 | struct of_phandle_args *spec) | |
611 | { | |
612 | struct msm_iommu_dev *iommu; | |
613 | unsigned long flags; | |
614 | int ret = 0; | |
615 | ||
616 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
617 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) | |
618 | if (iommu->dev->of_node == spec->np) | |
619 | break; | |
620 | ||
621 | if (!iommu || iommu->dev->of_node != spec->np) { | |
622 | ret = -ENODEV; | |
623 | goto fail; | |
624 | } | |
625 | ||
626 | insert_iommu_master(dev, &iommu, spec); | |
627 | fail: | |
628 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
629 | ||
630 | return ret; | |
631 | } | |
632 | ||
0720d1f0 SM |
633 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) |
634 | { | |
109bd48e | 635 | struct msm_iommu_dev *iommu = dev_id; |
33069739 | 636 | unsigned int fsr; |
a43d8c10 | 637 | int i, ret; |
0720d1f0 SM |
638 | |
639 | spin_lock(&msm_iommu_lock); | |
640 | ||
109bd48e | 641 | if (!iommu) { |
0720d1f0 SM |
642 | pr_err("Invalid device ID in context interrupt handler\n"); |
643 | goto fail; | |
644 | } | |
645 | ||
0720d1f0 | 646 | pr_err("Unexpected IOMMU page fault!\n"); |
109bd48e | 647 | pr_err("base = %08x\n", (unsigned int)iommu->base); |
0720d1f0 | 648 | |
109bd48e | 649 | ret = __enable_clocks(iommu); |
41f3f513 SM |
650 | if (ret) |
651 | goto fail; | |
652 | ||
109bd48e S |
653 | for (i = 0; i < iommu->ncb; i++) { |
654 | fsr = GET_FSR(iommu->base, i); | |
0720d1f0 SM |
655 | if (fsr) { |
656 | pr_err("Fault occurred in context %d.\n", i); | |
657 | pr_err("Interesting registers:\n"); | |
109bd48e S |
658 | print_ctx_regs(iommu->base, i); |
659 | SET_FSR(iommu->base, i, 0x4000000F); | |
0720d1f0 SM |
660 | } |
661 | } | |
109bd48e | 662 | __disable_clocks(iommu); |
0720d1f0 SM |
663 | fail: |
664 | spin_unlock(&msm_iommu_lock); | |
665 | return 0; | |
666 | } | |
667 | ||
f78ebca8 | 668 | static struct iommu_ops msm_iommu_ops = { |
4480845e | 669 | .capable = msm_iommu_capable, |
3e116c3c JR |
670 | .domain_alloc = msm_iommu_domain_alloc, |
671 | .domain_free = msm_iommu_domain_free, | |
0720d1f0 SM |
672 | .attach_dev = msm_iommu_attach_dev, |
673 | .detach_dev = msm_iommu_detach_dev, | |
674 | .map = msm_iommu_map, | |
675 | .unmap = msm_iommu_unmap, | |
e953f7f2 WD |
676 | /* |
677 | * Nothing is needed here, the barrier to guarantee | |
678 | * completion of the tlb sync operation is implicitly | |
679 | * taken care when the iommu client does a writel before | |
680 | * kick starting the other master. | |
681 | */ | |
682 | .iotlb_sync = NULL, | |
0720d1f0 | 683 | .iova_to_phys = msm_iommu_iova_to_phys, |
dea74f1c JR |
684 | .probe_device = msm_iommu_probe_device, |
685 | .release_device = msm_iommu_release_device, | |
ce2eb8f4 | 686 | .device_group = generic_device_group, |
83427275 | 687 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
f78ebca8 | 688 | .of_xlate = qcom_iommu_of_xlate, |
0720d1f0 SM |
689 | }; |
690 | ||
f7f125ef S |
691 | static int msm_iommu_probe(struct platform_device *pdev) |
692 | { | |
693 | struct resource *r; | |
42df43b3 | 694 | resource_size_t ioaddr; |
f7f125ef S |
695 | struct msm_iommu_dev *iommu; |
696 | int ret, par, val; | |
697 | ||
698 | iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); | |
699 | if (!iommu) | |
700 | return -ENODEV; | |
701 | ||
702 | iommu->dev = &pdev->dev; | |
703 | INIT_LIST_HEAD(&iommu->ctx_list); | |
704 | ||
705 | iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk"); | |
706 | if (IS_ERR(iommu->pclk)) { | |
707 | dev_err(iommu->dev, "could not get smmu_pclk\n"); | |
708 | return PTR_ERR(iommu->pclk); | |
709 | } | |
710 | ||
711 | ret = clk_prepare(iommu->pclk); | |
712 | if (ret) { | |
713 | dev_err(iommu->dev, "could not prepare smmu_pclk\n"); | |
714 | return ret; | |
715 | } | |
716 | ||
717 | iommu->clk = devm_clk_get(iommu->dev, "iommu_clk"); | |
718 | if (IS_ERR(iommu->clk)) { | |
719 | dev_err(iommu->dev, "could not get iommu_clk\n"); | |
720 | clk_unprepare(iommu->pclk); | |
721 | return PTR_ERR(iommu->clk); | |
722 | } | |
723 | ||
724 | ret = clk_prepare(iommu->clk); | |
725 | if (ret) { | |
726 | dev_err(iommu->dev, "could not prepare iommu_clk\n"); | |
727 | clk_unprepare(iommu->pclk); | |
728 | return ret; | |
729 | } | |
730 | ||
731 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
732 | iommu->base = devm_ioremap_resource(iommu->dev, r); | |
733 | if (IS_ERR(iommu->base)) { | |
734 | dev_err(iommu->dev, "could not get iommu base\n"); | |
735 | ret = PTR_ERR(iommu->base); | |
736 | goto fail; | |
737 | } | |
42df43b3 | 738 | ioaddr = r->start; |
f7f125ef S |
739 | |
740 | iommu->irq = platform_get_irq(pdev, 0); | |
741 | if (iommu->irq < 0) { | |
f7f125ef S |
742 | ret = -ENODEV; |
743 | goto fail; | |
744 | } | |
745 | ||
746 | ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val); | |
747 | if (ret) { | |
748 | dev_err(iommu->dev, "could not get ncb\n"); | |
749 | goto fail; | |
750 | } | |
751 | iommu->ncb = val; | |
752 | ||
753 | msm_iommu_reset(iommu->base, iommu->ncb); | |
754 | SET_M(iommu->base, 0, 1); | |
755 | SET_PAR(iommu->base, 0, 0); | |
756 | SET_V2PCFG(iommu->base, 0, 1); | |
757 | SET_V2PPR(iommu->base, 0, 0); | |
758 | par = GET_PAR(iommu->base, 0); | |
759 | SET_V2PCFG(iommu->base, 0, 0); | |
760 | SET_M(iommu->base, 0, 0); | |
761 | ||
762 | if (!par) { | |
763 | pr_err("Invalid PAR value detected\n"); | |
764 | ret = -ENODEV; | |
765 | goto fail; | |
766 | } | |
767 | ||
768 | ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL, | |
769 | msm_iommu_fault_handler, | |
770 | IRQF_ONESHOT | IRQF_SHARED, | |
771 | "msm_iommu_secure_irpt_handler", | |
772 | iommu); | |
773 | if (ret) { | |
774 | pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret); | |
775 | goto fail; | |
776 | } | |
777 | ||
778 | list_add(&iommu->dev_node, &qcom_iommu_devices); | |
42df43b3 JR |
779 | |
780 | ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL, | |
781 | "msm-smmu.%pa", &ioaddr); | |
782 | if (ret) { | |
783 | pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr); | |
784 | goto fail; | |
785 | } | |
786 | ||
787 | iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops); | |
788 | iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); | |
789 | ||
790 | ret = iommu_device_register(&iommu->iommu); | |
791 | if (ret) { | |
792 | pr_err("Could not register msm-smmu at %pa\n", &ioaddr); | |
793 | goto fail; | |
794 | } | |
795 | ||
892d7aad RM |
796 | bus_set_iommu(&platform_bus_type, &msm_iommu_ops); |
797 | ||
f7f125ef S |
798 | pr_info("device mapped at %p, irq %d with %d ctx banks\n", |
799 | iommu->base, iommu->irq, iommu->ncb); | |
800 | ||
801 | return ret; | |
802 | fail: | |
803 | clk_unprepare(iommu->clk); | |
804 | clk_unprepare(iommu->pclk); | |
805 | return ret; | |
806 | } | |
807 | ||
808 | static const struct of_device_id msm_iommu_dt_match[] = { | |
809 | { .compatible = "qcom,apq8064-iommu" }, | |
810 | {} | |
811 | }; | |
812 | ||
813 | static int msm_iommu_remove(struct platform_device *pdev) | |
814 | { | |
815 | struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); | |
816 | ||
817 | clk_unprepare(iommu->clk); | |
818 | clk_unprepare(iommu->pclk); | |
819 | return 0; | |
820 | } | |
821 | ||
822 | static struct platform_driver msm_iommu_driver = { | |
823 | .driver = { | |
824 | .name = "msm_iommu", | |
825 | .of_match_table = msm_iommu_dt_match, | |
826 | }, | |
827 | .probe = msm_iommu_probe, | |
828 | .remove = msm_iommu_remove, | |
829 | }; | |
830 | ||
831 | static int __init msm_iommu_driver_init(void) | |
832 | { | |
833 | int ret; | |
834 | ||
835 | ret = platform_driver_register(&msm_iommu_driver); | |
836 | if (ret != 0) | |
837 | pr_err("Failed to register IOMMU driver\n"); | |
838 | ||
839 | return ret; | |
840 | } | |
f7f125ef | 841 | subsys_initcall(msm_iommu_driver_init); |
f7f125ef | 842 |