Commit | Line | Data |
---|---|---|
08dbd0f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
41f3f513 | 2 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
a007dd51 PG |
3 | * |
4 | * Author: Stepan Moskovchenko <stepanm@codeaurora.org> | |
0720d1f0 SM |
5 | */ |
6 | ||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
8 | #include <linux/kernel.h> | |
a007dd51 | 9 | #include <linux/init.h> |
0720d1f0 SM |
10 | #include <linux/platform_device.h> |
11 | #include <linux/errno.h> | |
12 | #include <linux/io.h> | |
b77cf11f | 13 | #include <linux/io-pgtable.h> |
0720d1f0 SM |
14 | #include <linux/interrupt.h> |
15 | #include <linux/list.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/iommu.h> | |
41f3f513 | 19 | #include <linux/clk.h> |
f7f125ef | 20 | #include <linux/err.h> |
0720d1f0 SM |
21 | |
22 | #include <asm/cacheflush.h> | |
87dfb311 | 23 | #include <linux/sizes.h> |
0720d1f0 | 24 | |
0b559df5 SB |
25 | #include "msm_iommu_hw-8xxx.h" |
26 | #include "msm_iommu.h" | |
0720d1f0 | 27 | |
100832c9 SM |
28 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
29 | __asm__ __volatile__ ( \ | |
30 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ | |
31 | : "=r" (reg)) | |
32 | ||
83427275 OBC |
33 | /* bitmap of the page sizes currently supported */ |
34 | #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) | |
35 | ||
c4e0f3b2 | 36 | static DEFINE_SPINLOCK(msm_iommu_lock); |
109bd48e | 37 | static LIST_HEAD(qcom_iommu_devices); |
c9220fbd | 38 | static struct iommu_ops msm_iommu_ops; |
0720d1f0 SM |
39 | |
40 | struct msm_priv { | |
0720d1f0 | 41 | struct list_head list_attached; |
3e116c3c | 42 | struct iommu_domain domain; |
c9220fbd S |
43 | struct io_pgtable_cfg cfg; |
44 | struct io_pgtable_ops *iop; | |
45 | struct device *dev; | |
46 | spinlock_t pgtlock; /* pagetable lock */ | |
0720d1f0 SM |
47 | }; |
48 | ||
3e116c3c JR |
49 | static struct msm_priv *to_msm_priv(struct iommu_domain *dom) |
50 | { | |
51 | return container_of(dom, struct msm_priv, domain); | |
52 | } | |
53 | ||
109bd48e | 54 | static int __enable_clocks(struct msm_iommu_dev *iommu) |
41f3f513 SM |
55 | { |
56 | int ret; | |
57 | ||
109bd48e | 58 | ret = clk_enable(iommu->pclk); |
41f3f513 SM |
59 | if (ret) |
60 | goto fail; | |
61 | ||
109bd48e S |
62 | if (iommu->clk) { |
63 | ret = clk_enable(iommu->clk); | |
41f3f513 | 64 | if (ret) |
109bd48e | 65 | clk_disable(iommu->pclk); |
41f3f513 SM |
66 | } |
67 | fail: | |
68 | return ret; | |
69 | } | |
70 | ||
109bd48e | 71 | static void __disable_clocks(struct msm_iommu_dev *iommu) |
41f3f513 | 72 | { |
109bd48e S |
73 | if (iommu->clk) |
74 | clk_disable(iommu->clk); | |
75 | clk_disable(iommu->pclk); | |
41f3f513 SM |
76 | } |
77 | ||
f7f125ef S |
78 | static void msm_iommu_reset(void __iomem *base, int ncb) |
79 | { | |
80 | int ctx; | |
81 | ||
82 | SET_RPUE(base, 0); | |
83 | SET_RPUEIE(base, 0); | |
84 | SET_ESRRESTORE(base, 0); | |
85 | SET_TBE(base, 0); | |
86 | SET_CR(base, 0); | |
87 | SET_SPDMBE(base, 0); | |
88 | SET_TESTBUSCR(base, 0); | |
89 | SET_TLBRSW(base, 0); | |
90 | SET_GLOBAL_TLBIALL(base, 0); | |
91 | SET_RPU_ACR(base, 0); | |
92 | SET_TLBLKCRWE(base, 1); | |
93 | ||
94 | for (ctx = 0; ctx < ncb; ctx++) { | |
95 | SET_BPRCOSH(base, ctx, 0); | |
96 | SET_BPRCISH(base, ctx, 0); | |
97 | SET_BPRCNSH(base, ctx, 0); | |
98 | SET_BPSHCFG(base, ctx, 0); | |
99 | SET_BPMTCFG(base, ctx, 0); | |
100 | SET_ACTLR(base, ctx, 0); | |
101 | SET_SCTLR(base, ctx, 0); | |
102 | SET_FSRRESTORE(base, ctx, 0); | |
103 | SET_TTBR0(base, ctx, 0); | |
104 | SET_TTBR1(base, ctx, 0); | |
105 | SET_TTBCR(base, ctx, 0); | |
106 | SET_BFBCR(base, ctx, 0); | |
107 | SET_PAR(base, ctx, 0); | |
108 | SET_FAR(base, ctx, 0); | |
109 | SET_CTX_TLBIALL(base, ctx, 0); | |
110 | SET_TLBFLPTER(base, ctx, 0); | |
111 | SET_TLBSLPTER(base, ctx, 0); | |
112 | SET_TLBLKCR(base, ctx, 0); | |
f7f125ef S |
113 | SET_CONTEXTIDR(base, ctx, 0); |
114 | } | |
115 | } | |
116 | ||
c9220fbd | 117 | static void __flush_iotlb(void *cookie) |
0720d1f0 | 118 | { |
c9220fbd | 119 | struct msm_priv *priv = cookie; |
109bd48e S |
120 | struct msm_iommu_dev *iommu = NULL; |
121 | struct msm_iommu_ctx_dev *master; | |
33069739 | 122 | int ret = 0; |
109bd48e | 123 | |
c9220fbd S |
124 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
125 | ret = __enable_clocks(iommu); | |
126 | if (ret) | |
127 | goto fail; | |
0720d1f0 | 128 | |
c9220fbd S |
129 | list_for_each_entry(master, &iommu->ctx_list, list) |
130 | SET_CTX_TLBIALL(iommu->base, master->num, 0); | |
0720d1f0 | 131 | |
c9220fbd | 132 | __disable_clocks(iommu); |
f6f41eb9 | 133 | } |
c9220fbd S |
134 | fail: |
135 | return; | |
136 | } | |
137 | ||
138 | static void __flush_iotlb_range(unsigned long iova, size_t size, | |
139 | size_t granule, bool leaf, void *cookie) | |
140 | { | |
141 | struct msm_priv *priv = cookie; | |
142 | struct msm_iommu_dev *iommu = NULL; | |
143 | struct msm_iommu_ctx_dev *master; | |
144 | int ret = 0; | |
145 | int temp_size; | |
0720d1f0 | 146 | |
109bd48e S |
147 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
148 | ret = __enable_clocks(iommu); | |
41f3f513 SM |
149 | if (ret) |
150 | goto fail; | |
151 | ||
c9220fbd S |
152 | list_for_each_entry(master, &iommu->ctx_list, list) { |
153 | temp_size = size; | |
154 | do { | |
155 | iova &= TLBIVA_VA; | |
156 | iova |= GET_CONTEXTIDR_ASID(iommu->base, | |
157 | master->num); | |
158 | SET_TLBIVA(iommu->base, master->num, iova); | |
159 | iova += granule; | |
160 | } while (temp_size -= granule); | |
161 | } | |
109bd48e S |
162 | |
163 | __disable_clocks(iommu); | |
0720d1f0 | 164 | } |
c9220fbd | 165 | |
41f3f513 | 166 | fail: |
c9220fbd | 167 | return; |
0720d1f0 SM |
168 | } |
169 | ||
05aed941 WD |
170 | static void __flush_iotlb_walk(unsigned long iova, size_t size, |
171 | size_t granule, void *cookie) | |
c9220fbd | 172 | { |
05aed941 | 173 | __flush_iotlb_range(iova, size, granule, false, cookie); |
05aed941 WD |
174 | } |
175 | ||
3951c41a WD |
176 | static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, |
177 | unsigned long iova, size_t granule, void *cookie) | |
abfd6fe0 WD |
178 | { |
179 | __flush_iotlb_range(iova, granule, granule, true, cookie); | |
180 | } | |
181 | ||
298f7889 | 182 | static const struct iommu_flush_ops msm_iommu_flush_ops = { |
c9220fbd | 183 | .tlb_flush_all = __flush_iotlb, |
05aed941 | 184 | .tlb_flush_walk = __flush_iotlb_walk, |
abfd6fe0 | 185 | .tlb_add_page = __flush_iotlb_page, |
c9220fbd S |
186 | }; |
187 | ||
109bd48e S |
188 | static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) |
189 | { | |
190 | int idx; | |
191 | ||
192 | do { | |
193 | idx = find_next_zero_bit(map, end, start); | |
194 | if (idx == end) | |
195 | return -ENOSPC; | |
196 | } while (test_and_set_bit(idx, map)); | |
197 | ||
198 | return idx; | |
199 | } | |
200 | ||
201 | static void msm_iommu_free_ctx(unsigned long *map, int idx) | |
202 | { | |
203 | clear_bit(idx, map); | |
204 | } | |
205 | ||
206 | static void config_mids(struct msm_iommu_dev *iommu, | |
207 | struct msm_iommu_ctx_dev *master) | |
208 | { | |
209 | int mid, ctx, i; | |
210 | ||
211 | for (i = 0; i < master->num_mids; i++) { | |
212 | mid = master->mids[i]; | |
213 | ctx = master->num; | |
214 | ||
215 | SET_M2VCBR_N(iommu->base, mid, 0); | |
216 | SET_CBACR_N(iommu->base, ctx, 0); | |
217 | ||
218 | /* Set VMID = 0 */ | |
219 | SET_VMID(iommu->base, mid, 0); | |
220 | ||
221 | /* Set the context number for that MID to this context */ | |
222 | SET_CBNDX(iommu->base, mid, ctx); | |
223 | ||
224 | /* Set MID associated with this context bank to 0*/ | |
225 | SET_CBVMID(iommu->base, ctx, 0); | |
226 | ||
227 | /* Set the ASID for TLB tagging for this context */ | |
228 | SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); | |
229 | ||
230 | /* Set security bit override to be Non-secure */ | |
231 | SET_NSCFG(iommu->base, mid, 3); | |
232 | } | |
233 | } | |
234 | ||
0720d1f0 SM |
235 | static void __reset_context(void __iomem *base, int ctx) |
236 | { | |
237 | SET_BPRCOSH(base, ctx, 0); | |
238 | SET_BPRCISH(base, ctx, 0); | |
239 | SET_BPRCNSH(base, ctx, 0); | |
240 | SET_BPSHCFG(base, ctx, 0); | |
241 | SET_BPMTCFG(base, ctx, 0); | |
242 | SET_ACTLR(base, ctx, 0); | |
243 | SET_SCTLR(base, ctx, 0); | |
244 | SET_FSRRESTORE(base, ctx, 0); | |
245 | SET_TTBR0(base, ctx, 0); | |
246 | SET_TTBR1(base, ctx, 0); | |
247 | SET_TTBCR(base, ctx, 0); | |
248 | SET_BFBCR(base, ctx, 0); | |
249 | SET_PAR(base, ctx, 0); | |
250 | SET_FAR(base, ctx, 0); | |
251 | SET_CTX_TLBIALL(base, ctx, 0); | |
252 | SET_TLBFLPTER(base, ctx, 0); | |
253 | SET_TLBSLPTER(base, ctx, 0); | |
254 | SET_TLBLKCR(base, ctx, 0); | |
0720d1f0 SM |
255 | } |
256 | ||
c9220fbd S |
257 | static void __program_context(void __iomem *base, int ctx, |
258 | struct msm_priv *priv) | |
0720d1f0 SM |
259 | { |
260 | __reset_context(base, ctx); | |
261 | ||
c9220fbd S |
262 | /* Turn on TEX Remap */ |
263 | SET_TRE(base, ctx, 1); | |
264 | SET_AFE(base, ctx, 1); | |
265 | ||
0720d1f0 SM |
266 | /* Set up HTW mode */ |
267 | /* TLB miss configuration: perform HTW on miss */ | |
268 | SET_TLBMCFG(base, ctx, 0x3); | |
269 | ||
270 | /* V2P configuration: HTW for access */ | |
271 | SET_V2PCFG(base, ctx, 0x3); | |
272 | ||
c9220fbd | 273 | SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); |
d1e5f26f RM |
274 | SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr); |
275 | SET_TTBR1(base, ctx, 0); | |
c9220fbd S |
276 | |
277 | /* Set prrr and nmrr */ | |
278 | SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); | |
279 | SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); | |
0720d1f0 SM |
280 | |
281 | /* Invalidate the TLB for this context */ | |
282 | SET_CTX_TLBIALL(base, ctx, 0); | |
283 | ||
284 | /* Set interrupt number to "secure" interrupt */ | |
285 | SET_IRPTNDX(base, ctx, 0); | |
286 | ||
287 | /* Enable context fault interrupt */ | |
288 | SET_CFEIE(base, ctx, 1); | |
289 | ||
290 | /* Stall access on a context fault and let the handler deal with it */ | |
291 | SET_CFCFG(base, ctx, 1); | |
292 | ||
293 | /* Redirect all cacheable requests to L2 slave port. */ | |
294 | SET_RCISH(base, ctx, 1); | |
295 | SET_RCOSH(base, ctx, 1); | |
296 | SET_RCNSH(base, ctx, 1); | |
297 | ||
0720d1f0 SM |
298 | /* Turn on BFB prefetch */ |
299 | SET_BFBDFE(base, ctx, 1); | |
300 | ||
0720d1f0 SM |
301 | /* Enable the MMU */ |
302 | SET_M(base, ctx, 1); | |
303 | } | |
304 | ||
3e116c3c | 305 | static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) |
0720d1f0 | 306 | { |
3e116c3c | 307 | struct msm_priv *priv; |
0720d1f0 | 308 | |
3e116c3c JR |
309 | if (type != IOMMU_DOMAIN_UNMANAGED) |
310 | return NULL; | |
311 | ||
312 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
0720d1f0 SM |
313 | if (!priv) |
314 | goto fail_nomem; | |
315 | ||
316 | INIT_LIST_HEAD(&priv->list_attached); | |
4be6a290 | 317 | |
3e116c3c JR |
318 | priv->domain.geometry.aperture_start = 0; |
319 | priv->domain.geometry.aperture_end = (1ULL << 32) - 1; | |
320 | priv->domain.geometry.force_aperture = true; | |
4be6a290 | 321 | |
3e116c3c | 322 | return &priv->domain; |
0720d1f0 SM |
323 | |
324 | fail_nomem: | |
325 | kfree(priv); | |
3e116c3c | 326 | return NULL; |
0720d1f0 SM |
327 | } |
328 | ||
3e116c3c | 329 | static void msm_iommu_domain_free(struct iommu_domain *domain) |
0720d1f0 SM |
330 | { |
331 | struct msm_priv *priv; | |
332 | unsigned long flags; | |
0720d1f0 SM |
333 | |
334 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
3e116c3c | 335 | priv = to_msm_priv(domain); |
c9220fbd S |
336 | kfree(priv); |
337 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
338 | } | |
0720d1f0 | 339 | |
c9220fbd S |
340 | static int msm_iommu_domain_config(struct msm_priv *priv) |
341 | { | |
342 | spin_lock_init(&priv->pgtlock); | |
0720d1f0 | 343 | |
c9220fbd | 344 | priv->cfg = (struct io_pgtable_cfg) { |
c9220fbd S |
345 | .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, |
346 | .ias = 32, | |
347 | .oas = 32, | |
298f7889 | 348 | .tlb = &msm_iommu_flush_ops, |
c9220fbd S |
349 | .iommu_dev = priv->dev, |
350 | }; | |
0720d1f0 | 351 | |
c9220fbd S |
352 | priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); |
353 | if (!priv->iop) { | |
354 | dev_err(priv->dev, "Failed to allocate pgtable\n"); | |
355 | return -EINVAL; | |
356 | } | |
0720d1f0 | 357 | |
c9220fbd S |
358 | msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; |
359 | ||
360 | return 0; | |
0720d1f0 SM |
361 | } |
362 | ||
42df43b3 JR |
363 | /* Must be called under msm_iommu_lock */ |
364 | static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) | |
365 | { | |
366 | struct msm_iommu_dev *iommu, *ret = NULL; | |
367 | struct msm_iommu_ctx_dev *master; | |
368 | ||
369 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { | |
370 | master = list_first_entry(&iommu->ctx_list, | |
371 | struct msm_iommu_ctx_dev, | |
372 | list); | |
373 | if (master->of_node == dev->of_node) { | |
374 | ret = iommu; | |
375 | break; | |
376 | } | |
377 | } | |
378 | ||
379 | return ret; | |
380 | } | |
381 | ||
dea74f1c | 382 | static struct iommu_device *msm_iommu_probe_device(struct device *dev) |
42df43b3 JR |
383 | { |
384 | struct msm_iommu_dev *iommu; | |
385 | unsigned long flags; | |
42df43b3 JR |
386 | |
387 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
42df43b3 | 388 | iommu = find_iommu_for_dev(dev); |
37952146 NC |
389 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
390 | ||
dea74f1c JR |
391 | if (!iommu) |
392 | return ERR_PTR(-ENODEV); | |
ce2eb8f4 | 393 | |
dea74f1c | 394 | return &iommu->iommu; |
42df43b3 JR |
395 | } |
396 | ||
0720d1f0 SM |
397 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
398 | { | |
0720d1f0 SM |
399 | int ret = 0; |
400 | unsigned long flags; | |
109bd48e S |
401 | struct msm_iommu_dev *iommu; |
402 | struct msm_priv *priv = to_msm_priv(domain); | |
403 | struct msm_iommu_ctx_dev *master; | |
0720d1f0 | 404 | |
c9220fbd S |
405 | priv->dev = dev; |
406 | msm_iommu_domain_config(priv); | |
407 | ||
0720d1f0 | 408 | spin_lock_irqsave(&msm_iommu_lock, flags); |
109bd48e S |
409 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
410 | master = list_first_entry(&iommu->ctx_list, | |
411 | struct msm_iommu_ctx_dev, | |
412 | list); | |
413 | if (master->of_node == dev->of_node) { | |
414 | ret = __enable_clocks(iommu); | |
415 | if (ret) | |
416 | goto fail; | |
417 | ||
418 | list_for_each_entry(master, &iommu->ctx_list, list) { | |
419 | if (master->num) { | |
420 | dev_err(dev, "domain already attached"); | |
421 | ret = -EEXIST; | |
422 | goto fail; | |
423 | } | |
424 | master->num = | |
425 | msm_iommu_alloc_ctx(iommu->context_map, | |
426 | 0, iommu->ncb); | |
ba93c357 JL |
427 | if (IS_ERR_VALUE(master->num)) { |
428 | ret = -ENODEV; | |
429 | goto fail; | |
430 | } | |
109bd48e S |
431 | config_mids(iommu, master); |
432 | __program_context(iommu->base, master->num, | |
c9220fbd | 433 | priv); |
109bd48e S |
434 | } |
435 | __disable_clocks(iommu); | |
436 | list_add(&iommu->dom_node, &priv->list_attached); | |
0720d1f0 | 437 | } |
109bd48e | 438 | } |
0720d1f0 | 439 | |
0720d1f0 SM |
440 | fail: |
441 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
109bd48e | 442 | |
0720d1f0 SM |
443 | return ret; |
444 | } | |
445 | ||
c1fe9119 | 446 | static void msm_iommu_set_platform_dma(struct device *dev) |
0720d1f0 | 447 | { |
c1fe9119 | 448 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
109bd48e | 449 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 450 | unsigned long flags; |
109bd48e S |
451 | struct msm_iommu_dev *iommu; |
452 | struct msm_iommu_ctx_dev *master; | |
33069739 | 453 | int ret; |
0720d1f0 | 454 | |
c9220fbd | 455 | free_io_pgtable_ops(priv->iop); |
33069739 | 456 | |
c9220fbd | 457 | spin_lock_irqsave(&msm_iommu_lock, flags); |
109bd48e S |
458 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
459 | ret = __enable_clocks(iommu); | |
460 | if (ret) | |
461 | goto fail; | |
0720d1f0 | 462 | |
109bd48e S |
463 | list_for_each_entry(master, &iommu->ctx_list, list) { |
464 | msm_iommu_free_ctx(iommu->context_map, master->num); | |
465 | __reset_context(iommu->base, master->num); | |
466 | } | |
467 | __disable_clocks(iommu); | |
468 | } | |
0720d1f0 SM |
469 | fail: |
470 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
471 | } | |
472 | ||
c9220fbd | 473 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, |
8b35cdcf RM |
474 | phys_addr_t pa, size_t pgsize, size_t pgcount, |
475 | int prot, gfp_t gfp, size_t *mapped) | |
0720d1f0 | 476 | { |
c9220fbd | 477 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 478 | unsigned long flags; |
c9220fbd | 479 | int ret; |
0720d1f0 | 480 | |
c9220fbd | 481 | spin_lock_irqsave(&priv->pgtlock, flags); |
8b35cdcf RM |
482 | ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot, |
483 | GFP_ATOMIC, mapped); | |
c9220fbd | 484 | spin_unlock_irqrestore(&priv->pgtlock, flags); |
0720d1f0 | 485 | |
0720d1f0 SM |
486 | return ret; |
487 | } | |
488 | ||
c867c78a RM |
489 | static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
490 | size_t size) | |
491 | { | |
492 | struct msm_priv *priv = to_msm_priv(domain); | |
493 | ||
494 | __flush_iotlb_range(iova, size, SZ_4K, false, priv); | |
495 | } | |
496 | ||
c9220fbd | 497 | static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
8b35cdcf RM |
498 | size_t pgsize, size_t pgcount, |
499 | struct iommu_iotlb_gather *gather) | |
0720d1f0 | 500 | { |
c9220fbd | 501 | struct msm_priv *priv = to_msm_priv(domain); |
0720d1f0 | 502 | unsigned long flags; |
8b35cdcf | 503 | size_t ret; |
0720d1f0 | 504 | |
c9220fbd | 505 | spin_lock_irqsave(&priv->pgtlock, flags); |
8b35cdcf | 506 | ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); |
c9220fbd | 507 | spin_unlock_irqrestore(&priv->pgtlock, flags); |
0720d1f0 | 508 | |
8b35cdcf | 509 | return ret; |
0720d1f0 SM |
510 | } |
511 | ||
512 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | |
bb5547ac | 513 | dma_addr_t va) |
0720d1f0 SM |
514 | { |
515 | struct msm_priv *priv; | |
109bd48e S |
516 | struct msm_iommu_dev *iommu; |
517 | struct msm_iommu_ctx_dev *master; | |
0720d1f0 SM |
518 | unsigned int par; |
519 | unsigned long flags; | |
0720d1f0 | 520 | phys_addr_t ret = 0; |
0720d1f0 SM |
521 | |
522 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
523 | ||
3e116c3c | 524 | priv = to_msm_priv(domain); |
109bd48e S |
525 | iommu = list_first_entry(&priv->list_attached, |
526 | struct msm_iommu_dev, dom_node); | |
0720d1f0 | 527 | |
109bd48e S |
528 | if (list_empty(&iommu->ctx_list)) |
529 | goto fail; | |
0720d1f0 | 530 | |
109bd48e S |
531 | master = list_first_entry(&iommu->ctx_list, |
532 | struct msm_iommu_ctx_dev, list); | |
533 | if (!master) | |
534 | goto fail; | |
0720d1f0 | 535 | |
109bd48e | 536 | ret = __enable_clocks(iommu); |
41f3f513 SM |
537 | if (ret) |
538 | goto fail; | |
539 | ||
0720d1f0 | 540 | /* Invalidate context TLB */ |
109bd48e S |
541 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
542 | SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); | |
0720d1f0 | 543 | |
109bd48e | 544 | par = GET_PAR(iommu->base, master->num); |
0720d1f0 SM |
545 | |
546 | /* We are dealing with a supersection */ | |
109bd48e | 547 | if (GET_NOFAULT_SS(iommu->base, master->num)) |
0720d1f0 SM |
548 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); |
549 | else /* Upper 20 bits from PAR, lower 12 from VA */ | |
550 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); | |
551 | ||
109bd48e | 552 | if (GET_FAULT(iommu->base, master->num)) |
33069739 SM |
553 | ret = 0; |
554 | ||
109bd48e | 555 | __disable_clocks(iommu); |
0720d1f0 SM |
556 | fail: |
557 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
558 | return ret; | |
559 | } | |
560 | ||
0720d1f0 SM |
561 | static void print_ctx_regs(void __iomem *base, int ctx) |
562 | { | |
563 | unsigned int fsr = GET_FSR(base, ctx); | |
564 | pr_err("FAR = %08x PAR = %08x\n", | |
565 | GET_FAR(base, ctx), GET_PAR(base, ctx)); | |
566 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, | |
567 | (fsr & 0x02) ? "TF " : "", | |
568 | (fsr & 0x04) ? "AFF " : "", | |
569 | (fsr & 0x08) ? "APF " : "", | |
570 | (fsr & 0x10) ? "TLBMF " : "", | |
571 | (fsr & 0x20) ? "HTWDEEF " : "", | |
572 | (fsr & 0x40) ? "HTWSEEF " : "", | |
573 | (fsr & 0x80) ? "MHF " : "", | |
574 | (fsr & 0x10000) ? "SL " : "", | |
575 | (fsr & 0x40000000) ? "SS " : "", | |
576 | (fsr & 0x80000000) ? "MULTI " : ""); | |
577 | ||
578 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", | |
579 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); | |
580 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", | |
581 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); | |
582 | pr_err("SCTLR = %08x ACTLR = %08x\n", | |
583 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); | |
0720d1f0 SM |
584 | } |
585 | ||
bb5bdc5a | 586 | static int insert_iommu_master(struct device *dev, |
f78ebca8 S |
587 | struct msm_iommu_dev **iommu, |
588 | struct of_phandle_args *spec) | |
589 | { | |
4bbe0c7c | 590 | struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev); |
f78ebca8 S |
591 | int sid; |
592 | ||
593 | if (list_empty(&(*iommu)->ctx_list)) { | |
594 | master = kzalloc(sizeof(*master), GFP_ATOMIC); | |
bb5bdc5a XW |
595 | if (!master) { |
596 | dev_err(dev, "Failed to allocate iommu_master\n"); | |
597 | return -ENOMEM; | |
598 | } | |
f78ebca8 S |
599 | master->of_node = dev->of_node; |
600 | list_add(&master->list, &(*iommu)->ctx_list); | |
4bbe0c7c | 601 | dev_iommu_priv_set(dev, master); |
f78ebca8 S |
602 | } |
603 | ||
604 | for (sid = 0; sid < master->num_mids; sid++) | |
605 | if (master->mids[sid] == spec->args[0]) { | |
f066b8f7 | 606 | dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n", |
f78ebca8 | 607 | sid); |
bb5bdc5a | 608 | return 0; |
f78ebca8 S |
609 | } |
610 | ||
611 | master->mids[master->num_mids++] = spec->args[0]; | |
bb5bdc5a | 612 | return 0; |
f78ebca8 S |
613 | } |
614 | ||
615 | static int qcom_iommu_of_xlate(struct device *dev, | |
616 | struct of_phandle_args *spec) | |
617 | { | |
8b9ad480 | 618 | struct msm_iommu_dev *iommu = NULL, *iter; |
f78ebca8 S |
619 | unsigned long flags; |
620 | int ret = 0; | |
621 | ||
622 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
8b9ad480 XT |
623 | list_for_each_entry(iter, &qcom_iommu_devices, dev_node) { |
624 | if (iter->dev->of_node == spec->np) { | |
625 | iommu = iter; | |
f78ebca8 | 626 | break; |
8b9ad480 XT |
627 | } |
628 | } | |
f78ebca8 | 629 | |
8b9ad480 | 630 | if (!iommu) { |
f78ebca8 S |
631 | ret = -ENODEV; |
632 | goto fail; | |
633 | } | |
634 | ||
bb5bdc5a | 635 | ret = insert_iommu_master(dev, &iommu, spec); |
f78ebca8 S |
636 | fail: |
637 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
638 | ||
639 | return ret; | |
640 | } | |
641 | ||
0720d1f0 SM |
642 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) |
643 | { | |
109bd48e | 644 | struct msm_iommu_dev *iommu = dev_id; |
33069739 | 645 | unsigned int fsr; |
a43d8c10 | 646 | int i, ret; |
0720d1f0 SM |
647 | |
648 | spin_lock(&msm_iommu_lock); | |
649 | ||
109bd48e | 650 | if (!iommu) { |
0720d1f0 SM |
651 | pr_err("Invalid device ID in context interrupt handler\n"); |
652 | goto fail; | |
653 | } | |
654 | ||
0720d1f0 | 655 | pr_err("Unexpected IOMMU page fault!\n"); |
109bd48e | 656 | pr_err("base = %08x\n", (unsigned int)iommu->base); |
0720d1f0 | 657 | |
109bd48e | 658 | ret = __enable_clocks(iommu); |
41f3f513 SM |
659 | if (ret) |
660 | goto fail; | |
661 | ||
109bd48e S |
662 | for (i = 0; i < iommu->ncb; i++) { |
663 | fsr = GET_FSR(iommu->base, i); | |
0720d1f0 SM |
664 | if (fsr) { |
665 | pr_err("Fault occurred in context %d.\n", i); | |
666 | pr_err("Interesting registers:\n"); | |
109bd48e S |
667 | print_ctx_regs(iommu->base, i); |
668 | SET_FSR(iommu->base, i, 0x4000000F); | |
0720d1f0 SM |
669 | } |
670 | } | |
109bd48e | 671 | __disable_clocks(iommu); |
0720d1f0 SM |
672 | fail: |
673 | spin_unlock(&msm_iommu_lock); | |
674 | return 0; | |
675 | } | |
676 | ||
f78ebca8 | 677 | static struct iommu_ops msm_iommu_ops = { |
3e116c3c | 678 | .domain_alloc = msm_iommu_domain_alloc, |
dea74f1c | 679 | .probe_device = msm_iommu_probe_device, |
ce2eb8f4 | 680 | .device_group = generic_device_group, |
c1fe9119 | 681 | .set_platform_dma_ops = msm_iommu_set_platform_dma, |
83427275 | 682 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
f78ebca8 | 683 | .of_xlate = qcom_iommu_of_xlate, |
9a630a4b LB |
684 | .default_domain_ops = &(const struct iommu_domain_ops) { |
685 | .attach_dev = msm_iommu_attach_dev, | |
8b35cdcf RM |
686 | .map_pages = msm_iommu_map, |
687 | .unmap_pages = msm_iommu_unmap, | |
9a630a4b LB |
688 | /* |
689 | * Nothing is needed here, the barrier to guarantee | |
690 | * completion of the tlb sync operation is implicitly | |
691 | * taken care when the iommu client does a writel before | |
692 | * kick starting the other master. | |
693 | */ | |
694 | .iotlb_sync = NULL, | |
695 | .iotlb_sync_map = msm_iommu_sync_map, | |
696 | .iova_to_phys = msm_iommu_iova_to_phys, | |
697 | .free = msm_iommu_domain_free, | |
698 | } | |
0720d1f0 SM |
699 | }; |
700 | ||
f7f125ef S |
701 | static int msm_iommu_probe(struct platform_device *pdev) |
702 | { | |
703 | struct resource *r; | |
42df43b3 | 704 | resource_size_t ioaddr; |
f7f125ef S |
705 | struct msm_iommu_dev *iommu; |
706 | int ret, par, val; | |
707 | ||
708 | iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); | |
709 | if (!iommu) | |
710 | return -ENODEV; | |
711 | ||
712 | iommu->dev = &pdev->dev; | |
713 | INIT_LIST_HEAD(&iommu->ctx_list); | |
714 | ||
715 | iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk"); | |
a063158b DH |
716 | if (IS_ERR(iommu->pclk)) |
717 | return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk), | |
718 | "could not get smmu_pclk\n"); | |
f7f125ef S |
719 | |
720 | ret = clk_prepare(iommu->pclk); | |
a063158b DH |
721 | if (ret) |
722 | return dev_err_probe(iommu->dev, ret, | |
723 | "could not prepare smmu_pclk\n"); | |
f7f125ef S |
724 | |
725 | iommu->clk = devm_clk_get(iommu->dev, "iommu_clk"); | |
726 | if (IS_ERR(iommu->clk)) { | |
f7f125ef | 727 | clk_unprepare(iommu->pclk); |
a063158b DH |
728 | return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk), |
729 | "could not get iommu_clk\n"); | |
f7f125ef S |
730 | } |
731 | ||
732 | ret = clk_prepare(iommu->clk); | |
733 | if (ret) { | |
f7f125ef | 734 | clk_unprepare(iommu->pclk); |
a063158b | 735 | return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n"); |
f7f125ef S |
736 | } |
737 | ||
738 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
739 | iommu->base = devm_ioremap_resource(iommu->dev, r); | |
740 | if (IS_ERR(iommu->base)) { | |
a063158b | 741 | ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n"); |
f7f125ef S |
742 | goto fail; |
743 | } | |
42df43b3 | 744 | ioaddr = r->start; |
f7f125ef S |
745 | |
746 | iommu->irq = platform_get_irq(pdev, 0); | |
747 | if (iommu->irq < 0) { | |
f7f125ef S |
748 | ret = -ENODEV; |
749 | goto fail; | |
750 | } | |
751 | ||
752 | ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val); | |
753 | if (ret) { | |
754 | dev_err(iommu->dev, "could not get ncb\n"); | |
755 | goto fail; | |
756 | } | |
757 | iommu->ncb = val; | |
758 | ||
759 | msm_iommu_reset(iommu->base, iommu->ncb); | |
760 | SET_M(iommu->base, 0, 1); | |
761 | SET_PAR(iommu->base, 0, 0); | |
762 | SET_V2PCFG(iommu->base, 0, 1); | |
763 | SET_V2PPR(iommu->base, 0, 0); | |
764 | par = GET_PAR(iommu->base, 0); | |
765 | SET_V2PCFG(iommu->base, 0, 0); | |
766 | SET_M(iommu->base, 0, 0); | |
767 | ||
768 | if (!par) { | |
769 | pr_err("Invalid PAR value detected\n"); | |
770 | ret = -ENODEV; | |
771 | goto fail; | |
772 | } | |
773 | ||
774 | ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL, | |
775 | msm_iommu_fault_handler, | |
776 | IRQF_ONESHOT | IRQF_SHARED, | |
777 | "msm_iommu_secure_irpt_handler", | |
778 | iommu); | |
779 | if (ret) { | |
780 | pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret); | |
781 | goto fail; | |
782 | } | |
783 | ||
784 | list_add(&iommu->dev_node, &qcom_iommu_devices); | |
42df43b3 JR |
785 | |
786 | ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL, | |
787 | "msm-smmu.%pa", &ioaddr); | |
788 | if (ret) { | |
789 | pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr); | |
790 | goto fail; | |
791 | } | |
792 | ||
2d471b20 | 793 | ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev); |
42df43b3 JR |
794 | if (ret) { |
795 | pr_err("Could not register msm-smmu at %pa\n", &ioaddr); | |
796 | goto fail; | |
797 | } | |
798 | ||
f7f125ef S |
799 | pr_info("device mapped at %p, irq %d with %d ctx banks\n", |
800 | iommu->base, iommu->irq, iommu->ncb); | |
801 | ||
802 | return ret; | |
803 | fail: | |
804 | clk_unprepare(iommu->clk); | |
805 | clk_unprepare(iommu->pclk); | |
806 | return ret; | |
807 | } | |
808 | ||
809 | static const struct of_device_id msm_iommu_dt_match[] = { | |
810 | { .compatible = "qcom,apq8064-iommu" }, | |
811 | {} | |
812 | }; | |
813 | ||
816a4afc | 814 | static void msm_iommu_remove(struct platform_device *pdev) |
f7f125ef S |
815 | { |
816 | struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); | |
817 | ||
818 | clk_unprepare(iommu->clk); | |
819 | clk_unprepare(iommu->pclk); | |
f7f125ef S |
820 | } |
821 | ||
822 | static struct platform_driver msm_iommu_driver = { | |
823 | .driver = { | |
824 | .name = "msm_iommu", | |
825 | .of_match_table = msm_iommu_dt_match, | |
826 | }, | |
827 | .probe = msm_iommu_probe, | |
816a4afc | 828 | .remove_new = msm_iommu_remove, |
f7f125ef | 829 | }; |
6b813e0e | 830 | builtin_platform_driver(msm_iommu_driver); |