Commit | Line | Data |
---|---|---|
0720d1f0 SM |
1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
2 | * | |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License version 2 and | |
5 | * only version 2 as published by the Free Software Foundation. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
15 | * 02110-1301, USA. | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/iommu.h> | |
29 | ||
30 | #include <asm/cacheflush.h> | |
31 | #include <asm/sizes.h> | |
32 | ||
33 | #include <mach/iommu_hw-8xxx.h> | |
34 | #include <mach/iommu.h> | |
35 | ||
100832c9 SM |
36 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
37 | __asm__ __volatile__ ( \ | |
38 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ | |
39 | : "=r" (reg)) | |
40 | ||
41 | #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) | |
42 | #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) | |
43 | ||
44 | static int msm_iommu_tex_class[4]; | |
45 | ||
0720d1f0 SM |
46 | DEFINE_SPINLOCK(msm_iommu_lock); |
47 | ||
48 | struct msm_priv { | |
49 | unsigned long *pgtable; | |
50 | struct list_head list_attached; | |
51 | }; | |
52 | ||
33069739 | 53 | static int __flush_iotlb(struct iommu_domain *domain) |
0720d1f0 SM |
54 | { |
55 | struct msm_priv *priv = domain->priv; | |
56 | struct msm_iommu_drvdata *iommu_drvdata; | |
57 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
33069739 | 58 | int ret = 0; |
0720d1f0 SM |
59 | #ifndef CONFIG_IOMMU_PGTABLES_L2 |
60 | unsigned long *fl_table = priv->pgtable; | |
61 | int i; | |
62 | ||
f6f41eb9 SM |
63 | if (!list_empty(&priv->list_attached)) { |
64 | dmac_flush_range(fl_table, fl_table + SZ_16K); | |
0720d1f0 | 65 | |
f6f41eb9 SM |
66 | for (i = 0; i < NUM_FL_PTE; i++) |
67 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { | |
68 | void *sl_table = __va(fl_table[i] & | |
69 | FL_BASE_MASK); | |
70 | dmac_flush_range(sl_table, sl_table + SZ_4K); | |
71 | } | |
72 | } | |
0720d1f0 SM |
73 | #endif |
74 | ||
75 | list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { | |
76 | if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) | |
77 | BUG(); | |
78 | ||
79 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); | |
80 | SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); | |
81 | } | |
33069739 SM |
82 | |
83 | return ret; | |
0720d1f0 SM |
84 | } |
85 | ||
86 | static void __reset_context(void __iomem *base, int ctx) | |
87 | { | |
88 | SET_BPRCOSH(base, ctx, 0); | |
89 | SET_BPRCISH(base, ctx, 0); | |
90 | SET_BPRCNSH(base, ctx, 0); | |
91 | SET_BPSHCFG(base, ctx, 0); | |
92 | SET_BPMTCFG(base, ctx, 0); | |
93 | SET_ACTLR(base, ctx, 0); | |
94 | SET_SCTLR(base, ctx, 0); | |
95 | SET_FSRRESTORE(base, ctx, 0); | |
96 | SET_TTBR0(base, ctx, 0); | |
97 | SET_TTBR1(base, ctx, 0); | |
98 | SET_TTBCR(base, ctx, 0); | |
99 | SET_BFBCR(base, ctx, 0); | |
100 | SET_PAR(base, ctx, 0); | |
101 | SET_FAR(base, ctx, 0); | |
102 | SET_CTX_TLBIALL(base, ctx, 0); | |
103 | SET_TLBFLPTER(base, ctx, 0); | |
104 | SET_TLBSLPTER(base, ctx, 0); | |
105 | SET_TLBLKCR(base, ctx, 0); | |
106 | SET_PRRR(base, ctx, 0); | |
107 | SET_NMRR(base, ctx, 0); | |
108 | SET_CONTEXTIDR(base, ctx, 0); | |
109 | } | |
110 | ||
111 | static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) | |
112 | { | |
100832c9 | 113 | unsigned int prrr, nmrr; |
0720d1f0 SM |
114 | __reset_context(base, ctx); |
115 | ||
116 | /* Set up HTW mode */ | |
117 | /* TLB miss configuration: perform HTW on miss */ | |
118 | SET_TLBMCFG(base, ctx, 0x3); | |
119 | ||
120 | /* V2P configuration: HTW for access */ | |
121 | SET_V2PCFG(base, ctx, 0x3); | |
122 | ||
123 | SET_TTBCR(base, ctx, 0); | |
124 | SET_TTBR0_PA(base, ctx, (pgtable >> 14)); | |
125 | ||
126 | /* Invalidate the TLB for this context */ | |
127 | SET_CTX_TLBIALL(base, ctx, 0); | |
128 | ||
129 | /* Set interrupt number to "secure" interrupt */ | |
130 | SET_IRPTNDX(base, ctx, 0); | |
131 | ||
132 | /* Enable context fault interrupt */ | |
133 | SET_CFEIE(base, ctx, 1); | |
134 | ||
135 | /* Stall access on a context fault and let the handler deal with it */ | |
136 | SET_CFCFG(base, ctx, 1); | |
137 | ||
138 | /* Redirect all cacheable requests to L2 slave port. */ | |
139 | SET_RCISH(base, ctx, 1); | |
140 | SET_RCOSH(base, ctx, 1); | |
141 | SET_RCNSH(base, ctx, 1); | |
142 | ||
143 | /* Turn on TEX Remap */ | |
144 | SET_TRE(base, ctx, 1); | |
145 | ||
100832c9 SM |
146 | /* Set TEX remap attributes */ |
147 | RCP15_PRRR(prrr); | |
148 | RCP15_NMRR(nmrr); | |
149 | SET_PRRR(base, ctx, prrr); | |
150 | SET_NMRR(base, ctx, nmrr); | |
0720d1f0 SM |
151 | |
152 | /* Turn on BFB prefetch */ | |
153 | SET_BFBDFE(base, ctx, 1); | |
154 | ||
155 | #ifdef CONFIG_IOMMU_PGTABLES_L2 | |
156 | /* Configure page tables as inner-cacheable and shareable to reduce | |
157 | * the TLB miss penalty. | |
158 | */ | |
159 | SET_TTBR0_SH(base, ctx, 1); | |
160 | SET_TTBR1_SH(base, ctx, 1); | |
161 | ||
162 | SET_TTBR0_NOS(base, ctx, 1); | |
163 | SET_TTBR1_NOS(base, ctx, 1); | |
164 | ||
165 | SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ | |
166 | SET_TTBR0_IRGNL(base, ctx, 1); | |
167 | ||
168 | SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ | |
169 | SET_TTBR1_IRGNL(base, ctx, 1); | |
170 | ||
171 | SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ | |
172 | SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ | |
173 | #endif | |
174 | ||
175 | /* Enable the MMU */ | |
176 | SET_M(base, ctx, 1); | |
177 | } | |
178 | ||
179 | static int msm_iommu_domain_init(struct iommu_domain *domain) | |
180 | { | |
181 | struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
182 | ||
183 | if (!priv) | |
184 | goto fail_nomem; | |
185 | ||
186 | INIT_LIST_HEAD(&priv->list_attached); | |
187 | priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, | |
188 | get_order(SZ_16K)); | |
189 | ||
190 | if (!priv->pgtable) | |
191 | goto fail_nomem; | |
192 | ||
193 | memset(priv->pgtable, 0, SZ_16K); | |
194 | domain->priv = priv; | |
195 | return 0; | |
196 | ||
197 | fail_nomem: | |
198 | kfree(priv); | |
199 | return -ENOMEM; | |
200 | } | |
201 | ||
202 | static void msm_iommu_domain_destroy(struct iommu_domain *domain) | |
203 | { | |
204 | struct msm_priv *priv; | |
205 | unsigned long flags; | |
206 | unsigned long *fl_table; | |
207 | int i; | |
208 | ||
209 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
210 | priv = domain->priv; | |
211 | domain->priv = NULL; | |
212 | ||
213 | if (priv) { | |
214 | fl_table = priv->pgtable; | |
215 | ||
216 | for (i = 0; i < NUM_FL_PTE; i++) | |
217 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) | |
218 | free_page((unsigned long) __va(((fl_table[i]) & | |
219 | FL_BASE_MASK))); | |
220 | ||
221 | free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); | |
222 | priv->pgtable = NULL; | |
223 | } | |
224 | ||
225 | kfree(priv); | |
226 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
227 | } | |
228 | ||
229 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
230 | { | |
231 | struct msm_priv *priv; | |
232 | struct msm_iommu_ctx_dev *ctx_dev; | |
233 | struct msm_iommu_drvdata *iommu_drvdata; | |
234 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
235 | struct msm_iommu_ctx_drvdata *tmp_drvdata; | |
236 | int ret = 0; | |
237 | unsigned long flags; | |
238 | ||
239 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
240 | ||
241 | priv = domain->priv; | |
242 | ||
243 | if (!priv || !dev) { | |
244 | ret = -EINVAL; | |
245 | goto fail; | |
246 | } | |
247 | ||
248 | iommu_drvdata = dev_get_drvdata(dev->parent); | |
249 | ctx_drvdata = dev_get_drvdata(dev); | |
250 | ctx_dev = dev->platform_data; | |
251 | ||
252 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { | |
253 | ret = -EINVAL; | |
254 | goto fail; | |
255 | } | |
256 | ||
00d4b2bb SM |
257 | if (!list_empty(&ctx_drvdata->attached_elm)) { |
258 | ret = -EBUSY; | |
259 | goto fail; | |
260 | } | |
261 | ||
0720d1f0 SM |
262 | list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) |
263 | if (tmp_drvdata == ctx_drvdata) { | |
264 | ret = -EBUSY; | |
265 | goto fail; | |
266 | } | |
267 | ||
268 | __program_context(iommu_drvdata->base, ctx_dev->num, | |
269 | __pa(priv->pgtable)); | |
270 | ||
271 | list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); | |
33069739 | 272 | ret = __flush_iotlb(domain); |
0720d1f0 SM |
273 | |
274 | fail: | |
275 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
276 | return ret; | |
277 | } | |
278 | ||
279 | static void msm_iommu_detach_dev(struct iommu_domain *domain, | |
280 | struct device *dev) | |
281 | { | |
282 | struct msm_priv *priv; | |
283 | struct msm_iommu_ctx_dev *ctx_dev; | |
284 | struct msm_iommu_drvdata *iommu_drvdata; | |
285 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
286 | unsigned long flags; | |
33069739 | 287 | int ret; |
0720d1f0 SM |
288 | |
289 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
290 | priv = domain->priv; | |
291 | ||
292 | if (!priv || !dev) | |
293 | goto fail; | |
294 | ||
295 | iommu_drvdata = dev_get_drvdata(dev->parent); | |
296 | ctx_drvdata = dev_get_drvdata(dev); | |
297 | ctx_dev = dev->platform_data; | |
298 | ||
299 | if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) | |
300 | goto fail; | |
301 | ||
33069739 SM |
302 | ret = __flush_iotlb(domain); |
303 | if (ret) | |
304 | goto fail; | |
305 | ||
0720d1f0 SM |
306 | __reset_context(iommu_drvdata->base, ctx_dev->num); |
307 | list_del_init(&ctx_drvdata->attached_elm); | |
308 | ||
309 | fail: | |
310 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
311 | } | |
312 | ||
313 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, | |
314 | phys_addr_t pa, int order, int prot) | |
315 | { | |
316 | struct msm_priv *priv; | |
317 | unsigned long flags; | |
318 | unsigned long *fl_table; | |
319 | unsigned long *fl_pte; | |
320 | unsigned long fl_offset; | |
321 | unsigned long *sl_table; | |
322 | unsigned long *sl_pte; | |
323 | unsigned long sl_offset; | |
100832c9 | 324 | unsigned int pgprot; |
0720d1f0 | 325 | size_t len = 0x1000UL << order; |
100832c9 | 326 | int ret = 0, tex, sh; |
0720d1f0 SM |
327 | |
328 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
0720d1f0 | 329 | |
100832c9 SM |
330 | sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; |
331 | tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; | |
332 | ||
333 | if (tex < 0 || tex > NUM_TEX_CLASS - 1) { | |
334 | ret = -EINVAL; | |
335 | goto fail; | |
336 | } | |
337 | ||
338 | priv = domain->priv; | |
0720d1f0 SM |
339 | if (!priv) { |
340 | ret = -EINVAL; | |
341 | goto fail; | |
342 | } | |
343 | ||
344 | fl_table = priv->pgtable; | |
345 | ||
346 | if (len != SZ_16M && len != SZ_1M && | |
347 | len != SZ_64K && len != SZ_4K) { | |
348 | pr_debug("Bad size: %d\n", len); | |
349 | ret = -EINVAL; | |
350 | goto fail; | |
351 | } | |
352 | ||
353 | if (!fl_table) { | |
354 | pr_debug("Null page table\n"); | |
355 | ret = -EINVAL; | |
356 | goto fail; | |
357 | } | |
358 | ||
100832c9 SM |
359 | if (len == SZ_16M || len == SZ_1M) { |
360 | pgprot = sh ? FL_SHARED : 0; | |
361 | pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; | |
362 | pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; | |
363 | pgprot |= tex & 0x04 ? FL_TEX0 : 0; | |
364 | } else { | |
365 | pgprot = sh ? SL_SHARED : 0; | |
366 | pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; | |
367 | pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; | |
368 | pgprot |= tex & 0x04 ? SL_TEX0 : 0; | |
369 | } | |
370 | ||
0720d1f0 SM |
371 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
372 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ | |
373 | ||
374 | if (len == SZ_16M) { | |
375 | int i = 0; | |
376 | for (i = 0; i < 16; i++) | |
377 | *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | | |
378 | FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | | |
100832c9 | 379 | FL_SHARED | pgprot; |
0720d1f0 SM |
380 | } |
381 | ||
382 | if (len == SZ_1M) | |
383 | *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | | |
100832c9 | 384 | FL_TYPE_SECT | FL_SHARED | pgprot; |
0720d1f0 SM |
385 | |
386 | /* Need a 2nd level table */ | |
387 | if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { | |
388 | unsigned long *sl; | |
294b2dea | 389 | sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, |
0720d1f0 SM |
390 | get_order(SZ_4K)); |
391 | ||
392 | if (!sl) { | |
393 | pr_debug("Could not allocate second level table\n"); | |
394 | ret = -ENOMEM; | |
395 | goto fail; | |
396 | } | |
397 | ||
398 | memset(sl, 0, SZ_4K); | |
399 | *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); | |
400 | } | |
401 | ||
402 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); | |
403 | sl_offset = SL_OFFSET(va); | |
404 | sl_pte = sl_table + sl_offset; | |
405 | ||
406 | ||
407 | if (len == SZ_4K) | |
408 | *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | | |
100832c9 | 409 | SL_SHARED | SL_TYPE_SMALL | pgprot; |
0720d1f0 SM |
410 | |
411 | if (len == SZ_64K) { | |
412 | int i; | |
413 | ||
414 | for (i = 0; i < 16; i++) | |
415 | *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | | |
100832c9 | 416 | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; |
0720d1f0 SM |
417 | } |
418 | ||
33069739 | 419 | ret = __flush_iotlb(domain); |
0720d1f0 SM |
420 | fail: |
421 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
422 | return ret; | |
423 | } | |
424 | ||
425 | static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, | |
426 | int order) | |
427 | { | |
428 | struct msm_priv *priv; | |
429 | unsigned long flags; | |
430 | unsigned long *fl_table; | |
431 | unsigned long *fl_pte; | |
432 | unsigned long fl_offset; | |
433 | unsigned long *sl_table; | |
434 | unsigned long *sl_pte; | |
435 | unsigned long sl_offset; | |
436 | size_t len = 0x1000UL << order; | |
437 | int i, ret = 0; | |
438 | ||
439 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
440 | ||
441 | priv = domain->priv; | |
442 | ||
443 | if (!priv) { | |
444 | ret = -ENODEV; | |
445 | goto fail; | |
446 | } | |
447 | ||
448 | fl_table = priv->pgtable; | |
449 | ||
450 | if (len != SZ_16M && len != SZ_1M && | |
451 | len != SZ_64K && len != SZ_4K) { | |
452 | pr_debug("Bad length: %d\n", len); | |
453 | ret = -EINVAL; | |
454 | goto fail; | |
455 | } | |
456 | ||
457 | if (!fl_table) { | |
458 | pr_debug("Null page table\n"); | |
459 | ret = -EINVAL; | |
460 | goto fail; | |
461 | } | |
462 | ||
463 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ | |
464 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ | |
465 | ||
466 | if (*fl_pte == 0) { | |
467 | pr_debug("First level PTE is 0\n"); | |
468 | ret = -ENODEV; | |
469 | goto fail; | |
470 | } | |
471 | ||
472 | /* Unmap supersection */ | |
473 | if (len == SZ_16M) | |
474 | for (i = 0; i < 16; i++) | |
475 | *(fl_pte+i) = 0; | |
476 | ||
477 | if (len == SZ_1M) | |
478 | *fl_pte = 0; | |
479 | ||
480 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); | |
481 | sl_offset = SL_OFFSET(va); | |
482 | sl_pte = sl_table + sl_offset; | |
483 | ||
484 | if (len == SZ_64K) { | |
485 | for (i = 0; i < 16; i++) | |
486 | *(sl_pte+i) = 0; | |
487 | } | |
488 | ||
489 | if (len == SZ_4K) | |
490 | *sl_pte = 0; | |
491 | ||
492 | if (len == SZ_4K || len == SZ_64K) { | |
493 | int used = 0; | |
494 | ||
495 | for (i = 0; i < NUM_SL_PTE; i++) | |
496 | if (sl_table[i]) | |
497 | used = 1; | |
498 | if (!used) { | |
499 | free_page((unsigned long)sl_table); | |
500 | *fl_pte = 0; | |
501 | } | |
502 | } | |
503 | ||
33069739 | 504 | ret = __flush_iotlb(domain); |
0720d1f0 SM |
505 | fail: |
506 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
507 | return ret; | |
508 | } | |
509 | ||
510 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | |
511 | unsigned long va) | |
512 | { | |
513 | struct msm_priv *priv; | |
514 | struct msm_iommu_drvdata *iommu_drvdata; | |
515 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | |
516 | unsigned int par; | |
517 | unsigned long flags; | |
518 | void __iomem *base; | |
519 | phys_addr_t ret = 0; | |
520 | int ctx; | |
521 | ||
522 | spin_lock_irqsave(&msm_iommu_lock, flags); | |
523 | ||
524 | priv = domain->priv; | |
525 | if (list_empty(&priv->list_attached)) | |
526 | goto fail; | |
527 | ||
528 | ctx_drvdata = list_entry(priv->list_attached.next, | |
529 | struct msm_iommu_ctx_drvdata, attached_elm); | |
530 | iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); | |
531 | ||
532 | base = iommu_drvdata->base; | |
533 | ctx = ctx_drvdata->num; | |
534 | ||
535 | /* Invalidate context TLB */ | |
536 | SET_CTX_TLBIALL(base, ctx, 0); | |
537 | SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT); | |
538 | ||
0720d1f0 SM |
539 | par = GET_PAR(base, ctx); |
540 | ||
541 | /* We are dealing with a supersection */ | |
542 | if (GET_NOFAULT_SS(base, ctx)) | |
543 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); | |
544 | else /* Upper 20 bits from PAR, lower 12 from VA */ | |
545 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); | |
546 | ||
33069739 SM |
547 | if (GET_FAULT(base, ctx)) |
548 | ret = 0; | |
549 | ||
0720d1f0 SM |
550 | fail: |
551 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | |
552 | return ret; | |
553 | } | |
554 | ||
555 | static int msm_iommu_domain_has_cap(struct iommu_domain *domain, | |
556 | unsigned long cap) | |
557 | { | |
558 | return 0; | |
559 | } | |
560 | ||
561 | static void print_ctx_regs(void __iomem *base, int ctx) | |
562 | { | |
563 | unsigned int fsr = GET_FSR(base, ctx); | |
564 | pr_err("FAR = %08x PAR = %08x\n", | |
565 | GET_FAR(base, ctx), GET_PAR(base, ctx)); | |
566 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, | |
567 | (fsr & 0x02) ? "TF " : "", | |
568 | (fsr & 0x04) ? "AFF " : "", | |
569 | (fsr & 0x08) ? "APF " : "", | |
570 | (fsr & 0x10) ? "TLBMF " : "", | |
571 | (fsr & 0x20) ? "HTWDEEF " : "", | |
572 | (fsr & 0x40) ? "HTWSEEF " : "", | |
573 | (fsr & 0x80) ? "MHF " : "", | |
574 | (fsr & 0x10000) ? "SL " : "", | |
575 | (fsr & 0x40000000) ? "SS " : "", | |
576 | (fsr & 0x80000000) ? "MULTI " : ""); | |
577 | ||
578 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", | |
579 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); | |
580 | pr_err("TTBR0 = %08x TTBR1 = %08x\n", | |
581 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); | |
582 | pr_err("SCTLR = %08x ACTLR = %08x\n", | |
583 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); | |
584 | pr_err("PRRR = %08x NMRR = %08x\n", | |
585 | GET_PRRR(base, ctx), GET_NMRR(base, ctx)); | |
586 | } | |
587 | ||
588 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) | |
589 | { | |
590 | struct msm_iommu_drvdata *drvdata = dev_id; | |
591 | void __iomem *base; | |
33069739 SM |
592 | unsigned int fsr; |
593 | int ncb, i; | |
0720d1f0 SM |
594 | |
595 | spin_lock(&msm_iommu_lock); | |
596 | ||
597 | if (!drvdata) { | |
598 | pr_err("Invalid device ID in context interrupt handler\n"); | |
599 | goto fail; | |
600 | } | |
601 | ||
602 | base = drvdata->base; | |
603 | ||
0720d1f0 SM |
604 | pr_err("Unexpected IOMMU page fault!\n"); |
605 | pr_err("base = %08x\n", (unsigned int) base); | |
606 | ||
607 | ncb = GET_NCB(base)+1; | |
608 | for (i = 0; i < ncb; i++) { | |
609 | fsr = GET_FSR(base, i); | |
610 | if (fsr) { | |
611 | pr_err("Fault occurred in context %d.\n", i); | |
612 | pr_err("Interesting registers:\n"); | |
613 | print_ctx_regs(base, i); | |
614 | SET_FSR(base, i, 0x4000000F); | |
615 | } | |
616 | } | |
617 | fail: | |
618 | spin_unlock(&msm_iommu_lock); | |
619 | return 0; | |
620 | } | |
621 | ||
622 | static struct iommu_ops msm_iommu_ops = { | |
623 | .domain_init = msm_iommu_domain_init, | |
624 | .domain_destroy = msm_iommu_domain_destroy, | |
625 | .attach_dev = msm_iommu_attach_dev, | |
626 | .detach_dev = msm_iommu_detach_dev, | |
627 | .map = msm_iommu_map, | |
628 | .unmap = msm_iommu_unmap, | |
629 | .iova_to_phys = msm_iommu_iova_to_phys, | |
630 | .domain_has_cap = msm_iommu_domain_has_cap | |
631 | }; | |
632 | ||
100832c9 SM |
633 | static int __init get_tex_class(int icp, int ocp, int mt, int nos) |
634 | { | |
635 | int i = 0; | |
636 | unsigned int prrr = 0; | |
637 | unsigned int nmrr = 0; | |
638 | int c_icp, c_ocp, c_mt, c_nos; | |
639 | ||
640 | RCP15_PRRR(prrr); | |
641 | RCP15_NMRR(nmrr); | |
642 | ||
643 | for (i = 0; i < NUM_TEX_CLASS; i++) { | |
644 | c_nos = PRRR_NOS(prrr, i); | |
645 | c_mt = PRRR_MT(prrr, i); | |
646 | c_icp = NMRR_ICP(nmrr, i); | |
647 | c_ocp = NMRR_OCP(nmrr, i); | |
648 | ||
649 | if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) | |
650 | return i; | |
651 | } | |
652 | ||
653 | return -ENODEV; | |
654 | } | |
655 | ||
656 | static void __init setup_iommu_tex_classes(void) | |
657 | { | |
658 | msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = | |
659 | get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); | |
660 | ||
661 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = | |
662 | get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); | |
663 | ||
664 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = | |
665 | get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); | |
666 | ||
667 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = | |
668 | get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); | |
669 | } | |
670 | ||
516cbc79 | 671 | static int __init msm_iommu_init(void) |
0720d1f0 | 672 | { |
100832c9 | 673 | setup_iommu_tex_classes(); |
0720d1f0 SM |
674 | register_iommu(&msm_iommu_ops); |
675 | return 0; | |
676 | } | |
677 | ||
678 | subsys_initcall(msm_iommu_init); | |
679 | ||
680 | MODULE_LICENSE("GPL v2"); | |
681 | MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); |