Merge branches 'acpi-tables', 'acpi-bus' and 'acpi-processor'
[linux-2.6-block.git] / drivers / iommu / msm_iommu.c
CommitLineData
41f3f513 1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
0720d1f0
SM
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/iommu.h>
41f3f513 29#include <linux/clk.h>
f7f125ef 30#include <linux/err.h>
f78ebca8 31#include <linux/of_iommu.h>
0720d1f0
SM
32
33#include <asm/cacheflush.h>
34#include <asm/sizes.h>
35
0b559df5
SB
36#include "msm_iommu_hw-8xxx.h"
37#include "msm_iommu.h"
c9220fbd 38#include "io-pgtable.h"
0720d1f0 39
100832c9
SM
40#define MRC(reg, processor, op1, crn, crm, op2) \
41__asm__ __volatile__ ( \
42" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
43: "=r" (reg))
44
83427275
OBC
45/* bitmap of the page sizes currently supported */
46#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
47
0720d1f0 48DEFINE_SPINLOCK(msm_iommu_lock);
109bd48e 49static LIST_HEAD(qcom_iommu_devices);
c9220fbd 50static struct iommu_ops msm_iommu_ops;
0720d1f0
SM
51
52struct msm_priv {
0720d1f0 53 struct list_head list_attached;
3e116c3c 54 struct iommu_domain domain;
c9220fbd
S
55 struct io_pgtable_cfg cfg;
56 struct io_pgtable_ops *iop;
57 struct device *dev;
58 spinlock_t pgtlock; /* pagetable lock */
0720d1f0
SM
59};
60
3e116c3c
JR
61static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
62{
63 return container_of(dom, struct msm_priv, domain);
64}
65
109bd48e 66static int __enable_clocks(struct msm_iommu_dev *iommu)
41f3f513
SM
67{
68 int ret;
69
109bd48e 70 ret = clk_enable(iommu->pclk);
41f3f513
SM
71 if (ret)
72 goto fail;
73
109bd48e
S
74 if (iommu->clk) {
75 ret = clk_enable(iommu->clk);
41f3f513 76 if (ret)
109bd48e 77 clk_disable(iommu->pclk);
41f3f513
SM
78 }
79fail:
80 return ret;
81}
82
109bd48e 83static void __disable_clocks(struct msm_iommu_dev *iommu)
41f3f513 84{
109bd48e
S
85 if (iommu->clk)
86 clk_disable(iommu->clk);
87 clk_disable(iommu->pclk);
41f3f513
SM
88}
89
f7f125ef
S
90static void msm_iommu_reset(void __iomem *base, int ncb)
91{
92 int ctx;
93
94 SET_RPUE(base, 0);
95 SET_RPUEIE(base, 0);
96 SET_ESRRESTORE(base, 0);
97 SET_TBE(base, 0);
98 SET_CR(base, 0);
99 SET_SPDMBE(base, 0);
100 SET_TESTBUSCR(base, 0);
101 SET_TLBRSW(base, 0);
102 SET_GLOBAL_TLBIALL(base, 0);
103 SET_RPU_ACR(base, 0);
104 SET_TLBLKCRWE(base, 1);
105
106 for (ctx = 0; ctx < ncb; ctx++) {
107 SET_BPRCOSH(base, ctx, 0);
108 SET_BPRCISH(base, ctx, 0);
109 SET_BPRCNSH(base, ctx, 0);
110 SET_BPSHCFG(base, ctx, 0);
111 SET_BPMTCFG(base, ctx, 0);
112 SET_ACTLR(base, ctx, 0);
113 SET_SCTLR(base, ctx, 0);
114 SET_FSRRESTORE(base, ctx, 0);
115 SET_TTBR0(base, ctx, 0);
116 SET_TTBR1(base, ctx, 0);
117 SET_TTBCR(base, ctx, 0);
118 SET_BFBCR(base, ctx, 0);
119 SET_PAR(base, ctx, 0);
120 SET_FAR(base, ctx, 0);
121 SET_CTX_TLBIALL(base, ctx, 0);
122 SET_TLBFLPTER(base, ctx, 0);
123 SET_TLBSLPTER(base, ctx, 0);
124 SET_TLBLKCR(base, ctx, 0);
f7f125ef
S
125 SET_CONTEXTIDR(base, ctx, 0);
126 }
127}
128
c9220fbd 129static void __flush_iotlb(void *cookie)
0720d1f0 130{
c9220fbd 131 struct msm_priv *priv = cookie;
109bd48e
S
132 struct msm_iommu_dev *iommu = NULL;
133 struct msm_iommu_ctx_dev *master;
33069739 134 int ret = 0;
109bd48e 135
c9220fbd
S
136 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
137 ret = __enable_clocks(iommu);
138 if (ret)
139 goto fail;
0720d1f0 140
c9220fbd
S
141 list_for_each_entry(master, &iommu->ctx_list, list)
142 SET_CTX_TLBIALL(iommu->base, master->num, 0);
0720d1f0 143
c9220fbd 144 __disable_clocks(iommu);
f6f41eb9 145 }
c9220fbd
S
146fail:
147 return;
148}
149
150static void __flush_iotlb_range(unsigned long iova, size_t size,
151 size_t granule, bool leaf, void *cookie)
152{
153 struct msm_priv *priv = cookie;
154 struct msm_iommu_dev *iommu = NULL;
155 struct msm_iommu_ctx_dev *master;
156 int ret = 0;
157 int temp_size;
0720d1f0 158
109bd48e
S
159 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
160 ret = __enable_clocks(iommu);
41f3f513
SM
161 if (ret)
162 goto fail;
163
c9220fbd
S
164 list_for_each_entry(master, &iommu->ctx_list, list) {
165 temp_size = size;
166 do {
167 iova &= TLBIVA_VA;
168 iova |= GET_CONTEXTIDR_ASID(iommu->base,
169 master->num);
170 SET_TLBIVA(iommu->base, master->num, iova);
171 iova += granule;
172 } while (temp_size -= granule);
173 }
109bd48e
S
174
175 __disable_clocks(iommu);
0720d1f0 176 }
c9220fbd 177
41f3f513 178fail:
c9220fbd 179 return;
0720d1f0
SM
180}
181
c9220fbd
S
182static void __flush_iotlb_sync(void *cookie)
183{
184 /*
185 * Nothing is needed here, the barrier to guarantee
186 * completion of the tlb sync operation is implicitly
187 * taken care when the iommu client does a writel before
188 * kick starting the other master.
189 */
190}
191
192static const struct iommu_gather_ops msm_iommu_gather_ops = {
193 .tlb_flush_all = __flush_iotlb,
194 .tlb_add_flush = __flush_iotlb_range,
195 .tlb_sync = __flush_iotlb_sync,
196};
197
109bd48e
S
198static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
199{
200 int idx;
201
202 do {
203 idx = find_next_zero_bit(map, end, start);
204 if (idx == end)
205 return -ENOSPC;
206 } while (test_and_set_bit(idx, map));
207
208 return idx;
209}
210
211static void msm_iommu_free_ctx(unsigned long *map, int idx)
212{
213 clear_bit(idx, map);
214}
215
216static void config_mids(struct msm_iommu_dev *iommu,
217 struct msm_iommu_ctx_dev *master)
218{
219 int mid, ctx, i;
220
221 for (i = 0; i < master->num_mids; i++) {
222 mid = master->mids[i];
223 ctx = master->num;
224
225 SET_M2VCBR_N(iommu->base, mid, 0);
226 SET_CBACR_N(iommu->base, ctx, 0);
227
228 /* Set VMID = 0 */
229 SET_VMID(iommu->base, mid, 0);
230
231 /* Set the context number for that MID to this context */
232 SET_CBNDX(iommu->base, mid, ctx);
233
234 /* Set MID associated with this context bank to 0*/
235 SET_CBVMID(iommu->base, ctx, 0);
236
237 /* Set the ASID for TLB tagging for this context */
238 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
239
240 /* Set security bit override to be Non-secure */
241 SET_NSCFG(iommu->base, mid, 3);
242 }
243}
244
0720d1f0
SM
245static void __reset_context(void __iomem *base, int ctx)
246{
247 SET_BPRCOSH(base, ctx, 0);
248 SET_BPRCISH(base, ctx, 0);
249 SET_BPRCNSH(base, ctx, 0);
250 SET_BPSHCFG(base, ctx, 0);
251 SET_BPMTCFG(base, ctx, 0);
252 SET_ACTLR(base, ctx, 0);
253 SET_SCTLR(base, ctx, 0);
254 SET_FSRRESTORE(base, ctx, 0);
255 SET_TTBR0(base, ctx, 0);
256 SET_TTBR1(base, ctx, 0);
257 SET_TTBCR(base, ctx, 0);
258 SET_BFBCR(base, ctx, 0);
259 SET_PAR(base, ctx, 0);
260 SET_FAR(base, ctx, 0);
261 SET_CTX_TLBIALL(base, ctx, 0);
262 SET_TLBFLPTER(base, ctx, 0);
263 SET_TLBSLPTER(base, ctx, 0);
264 SET_TLBLKCR(base, ctx, 0);
0720d1f0
SM
265}
266
c9220fbd
S
267static void __program_context(void __iomem *base, int ctx,
268 struct msm_priv *priv)
0720d1f0
SM
269{
270 __reset_context(base, ctx);
271
c9220fbd
S
272 /* Turn on TEX Remap */
273 SET_TRE(base, ctx, 1);
274 SET_AFE(base, ctx, 1);
275
0720d1f0
SM
276 /* Set up HTW mode */
277 /* TLB miss configuration: perform HTW on miss */
278 SET_TLBMCFG(base, ctx, 0x3);
279
280 /* V2P configuration: HTW for access */
281 SET_V2PCFG(base, ctx, 0x3);
282
c9220fbd
S
283 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
284 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
285 SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
286
287 /* Set prrr and nmrr */
288 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
289 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
0720d1f0
SM
290
291 /* Invalidate the TLB for this context */
292 SET_CTX_TLBIALL(base, ctx, 0);
293
294 /* Set interrupt number to "secure" interrupt */
295 SET_IRPTNDX(base, ctx, 0);
296
297 /* Enable context fault interrupt */
298 SET_CFEIE(base, ctx, 1);
299
300 /* Stall access on a context fault and let the handler deal with it */
301 SET_CFCFG(base, ctx, 1);
302
303 /* Redirect all cacheable requests to L2 slave port. */
304 SET_RCISH(base, ctx, 1);
305 SET_RCOSH(base, ctx, 1);
306 SET_RCNSH(base, ctx, 1);
307
0720d1f0
SM
308 /* Turn on BFB prefetch */
309 SET_BFBDFE(base, ctx, 1);
310
0720d1f0
SM
311 /* Enable the MMU */
312 SET_M(base, ctx, 1);
313}
314
3e116c3c 315static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
0720d1f0 316{
3e116c3c 317 struct msm_priv *priv;
0720d1f0 318
3e116c3c
JR
319 if (type != IOMMU_DOMAIN_UNMANAGED)
320 return NULL;
321
322 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0720d1f0
SM
323 if (!priv)
324 goto fail_nomem;
325
326 INIT_LIST_HEAD(&priv->list_attached);
4be6a290 327
3e116c3c
JR
328 priv->domain.geometry.aperture_start = 0;
329 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
330 priv->domain.geometry.force_aperture = true;
4be6a290 331
3e116c3c 332 return &priv->domain;
0720d1f0
SM
333
334fail_nomem:
335 kfree(priv);
3e116c3c 336 return NULL;
0720d1f0
SM
337}
338
3e116c3c 339static void msm_iommu_domain_free(struct iommu_domain *domain)
0720d1f0
SM
340{
341 struct msm_priv *priv;
342 unsigned long flags;
0720d1f0
SM
343
344 spin_lock_irqsave(&msm_iommu_lock, flags);
3e116c3c 345 priv = to_msm_priv(domain);
c9220fbd
S
346 kfree(priv);
347 spin_unlock_irqrestore(&msm_iommu_lock, flags);
348}
0720d1f0 349
c9220fbd
S
350static int msm_iommu_domain_config(struct msm_priv *priv)
351{
352 spin_lock_init(&priv->pgtlock);
0720d1f0 353
c9220fbd
S
354 priv->cfg = (struct io_pgtable_cfg) {
355 .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
356 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
357 .ias = 32,
358 .oas = 32,
359 .tlb = &msm_iommu_gather_ops,
360 .iommu_dev = priv->dev,
361 };
0720d1f0 362
c9220fbd
S
363 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
364 if (!priv->iop) {
365 dev_err(priv->dev, "Failed to allocate pgtable\n");
366 return -EINVAL;
367 }
0720d1f0 368
c9220fbd
S
369 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
370
371 return 0;
0720d1f0
SM
372}
373
42df43b3
JR
374/* Must be called under msm_iommu_lock */
375static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
376{
377 struct msm_iommu_dev *iommu, *ret = NULL;
378 struct msm_iommu_ctx_dev *master;
379
380 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
381 master = list_first_entry(&iommu->ctx_list,
382 struct msm_iommu_ctx_dev,
383 list);
384 if (master->of_node == dev->of_node) {
385 ret = iommu;
386 break;
387 }
388 }
389
390 return ret;
391}
392
393static int msm_iommu_add_device(struct device *dev)
394{
395 struct msm_iommu_dev *iommu;
ce2eb8f4 396 struct iommu_group *group;
42df43b3
JR
397 unsigned long flags;
398 int ret = 0;
399
400 spin_lock_irqsave(&msm_iommu_lock, flags);
401
402 iommu = find_iommu_for_dev(dev);
403 if (iommu)
404 iommu_device_link(&iommu->iommu, dev);
405 else
406 ret = -ENODEV;
407
408 spin_unlock_irqrestore(&msm_iommu_lock, flags);
409
ce2eb8f4
RM
410 if (ret)
411 return ret;
412
413 group = iommu_group_get_for_dev(dev);
414 if (IS_ERR(group))
415 return PTR_ERR(group);
416
417 iommu_group_put(group);
418
419 return 0;
42df43b3
JR
420}
421
422static void msm_iommu_remove_device(struct device *dev)
423{
424 struct msm_iommu_dev *iommu;
425 unsigned long flags;
426
427 spin_lock_irqsave(&msm_iommu_lock, flags);
428
429 iommu = find_iommu_for_dev(dev);
430 if (iommu)
431 iommu_device_unlink(&iommu->iommu, dev);
432
433 spin_unlock_irqrestore(&msm_iommu_lock, flags);
ce2eb8f4
RM
434
435 iommu_group_remove_device(dev);
42df43b3
JR
436}
437
0720d1f0
SM
438static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
439{
0720d1f0
SM
440 int ret = 0;
441 unsigned long flags;
109bd48e
S
442 struct msm_iommu_dev *iommu;
443 struct msm_priv *priv = to_msm_priv(domain);
444 struct msm_iommu_ctx_dev *master;
0720d1f0 445
c9220fbd
S
446 priv->dev = dev;
447 msm_iommu_domain_config(priv);
448
0720d1f0 449 spin_lock_irqsave(&msm_iommu_lock, flags);
109bd48e
S
450 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
451 master = list_first_entry(&iommu->ctx_list,
452 struct msm_iommu_ctx_dev,
453 list);
454 if (master->of_node == dev->of_node) {
455 ret = __enable_clocks(iommu);
456 if (ret)
457 goto fail;
458
459 list_for_each_entry(master, &iommu->ctx_list, list) {
460 if (master->num) {
461 dev_err(dev, "domain already attached");
462 ret = -EEXIST;
463 goto fail;
464 }
465 master->num =
466 msm_iommu_alloc_ctx(iommu->context_map,
467 0, iommu->ncb);
468 if (IS_ERR_VALUE(master->num)) {
469 ret = -ENODEV;
470 goto fail;
471 }
472 config_mids(iommu, master);
473 __program_context(iommu->base, master->num,
c9220fbd 474 priv);
109bd48e
S
475 }
476 __disable_clocks(iommu);
477 list_add(&iommu->dom_node, &priv->list_attached);
0720d1f0 478 }
109bd48e 479 }
0720d1f0 480
0720d1f0
SM
481fail:
482 spin_unlock_irqrestore(&msm_iommu_lock, flags);
109bd48e 483
0720d1f0
SM
484 return ret;
485}
486
487static void msm_iommu_detach_dev(struct iommu_domain *domain,
488 struct device *dev)
489{
109bd48e 490 struct msm_priv *priv = to_msm_priv(domain);
0720d1f0 491 unsigned long flags;
109bd48e
S
492 struct msm_iommu_dev *iommu;
493 struct msm_iommu_ctx_dev *master;
33069739 494 int ret;
0720d1f0 495
c9220fbd 496 free_io_pgtable_ops(priv->iop);
33069739 497
c9220fbd 498 spin_lock_irqsave(&msm_iommu_lock, flags);
109bd48e
S
499 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
500 ret = __enable_clocks(iommu);
501 if (ret)
502 goto fail;
0720d1f0 503
109bd48e
S
504 list_for_each_entry(master, &iommu->ctx_list, list) {
505 msm_iommu_free_ctx(iommu->context_map, master->num);
506 __reset_context(iommu->base, master->num);
507 }
508 __disable_clocks(iommu);
509 }
0720d1f0
SM
510fail:
511 spin_unlock_irqrestore(&msm_iommu_lock, flags);
512}
513
c9220fbd 514static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
5009065d 515 phys_addr_t pa, size_t len, int prot)
0720d1f0 516{
c9220fbd 517 struct msm_priv *priv = to_msm_priv(domain);
0720d1f0 518 unsigned long flags;
c9220fbd 519 int ret;
0720d1f0 520
c9220fbd
S
521 spin_lock_irqsave(&priv->pgtlock, flags);
522 ret = priv->iop->map(priv->iop, iova, pa, len, prot);
523 spin_unlock_irqrestore(&priv->pgtlock, flags);
0720d1f0 524
0720d1f0
SM
525 return ret;
526}
527
c9220fbd
S
528static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
529 size_t len)
0720d1f0 530{
c9220fbd 531 struct msm_priv *priv = to_msm_priv(domain);
0720d1f0 532 unsigned long flags;
0720d1f0 533
c9220fbd
S
534 spin_lock_irqsave(&priv->pgtlock, flags);
535 len = priv->iop->unmap(priv->iop, iova, len);
536 spin_unlock_irqrestore(&priv->pgtlock, flags);
0720d1f0 537
5009065d 538 return len;
0720d1f0
SM
539}
540
541static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
bb5547ac 542 dma_addr_t va)
0720d1f0
SM
543{
544 struct msm_priv *priv;
109bd48e
S
545 struct msm_iommu_dev *iommu;
546 struct msm_iommu_ctx_dev *master;
0720d1f0
SM
547 unsigned int par;
548 unsigned long flags;
0720d1f0 549 phys_addr_t ret = 0;
0720d1f0
SM
550
551 spin_lock_irqsave(&msm_iommu_lock, flags);
552
3e116c3c 553 priv = to_msm_priv(domain);
109bd48e
S
554 iommu = list_first_entry(&priv->list_attached,
555 struct msm_iommu_dev, dom_node);
0720d1f0 556
109bd48e
S
557 if (list_empty(&iommu->ctx_list))
558 goto fail;
0720d1f0 559
109bd48e
S
560 master = list_first_entry(&iommu->ctx_list,
561 struct msm_iommu_ctx_dev, list);
562 if (!master)
563 goto fail;
0720d1f0 564
109bd48e 565 ret = __enable_clocks(iommu);
41f3f513
SM
566 if (ret)
567 goto fail;
568
0720d1f0 569 /* Invalidate context TLB */
109bd48e
S
570 SET_CTX_TLBIALL(iommu->base, master->num, 0);
571 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
0720d1f0 572
109bd48e 573 par = GET_PAR(iommu->base, master->num);
0720d1f0
SM
574
575 /* We are dealing with a supersection */
109bd48e 576 if (GET_NOFAULT_SS(iommu->base, master->num))
0720d1f0
SM
577 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
578 else /* Upper 20 bits from PAR, lower 12 from VA */
579 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
580
109bd48e 581 if (GET_FAULT(iommu->base, master->num))
33069739
SM
582 ret = 0;
583
109bd48e 584 __disable_clocks(iommu);
0720d1f0
SM
585fail:
586 spin_unlock_irqrestore(&msm_iommu_lock, flags);
587 return ret;
588}
589
4480845e 590static bool msm_iommu_capable(enum iommu_cap cap)
0720d1f0 591{
4480845e 592 return false;
0720d1f0
SM
593}
594
595static void print_ctx_regs(void __iomem *base, int ctx)
596{
597 unsigned int fsr = GET_FSR(base, ctx);
598 pr_err("FAR = %08x PAR = %08x\n",
599 GET_FAR(base, ctx), GET_PAR(base, ctx));
600 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
601 (fsr & 0x02) ? "TF " : "",
602 (fsr & 0x04) ? "AFF " : "",
603 (fsr & 0x08) ? "APF " : "",
604 (fsr & 0x10) ? "TLBMF " : "",
605 (fsr & 0x20) ? "HTWDEEF " : "",
606 (fsr & 0x40) ? "HTWSEEF " : "",
607 (fsr & 0x80) ? "MHF " : "",
608 (fsr & 0x10000) ? "SL " : "",
609 (fsr & 0x40000000) ? "SS " : "",
610 (fsr & 0x80000000) ? "MULTI " : "");
611
612 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
613 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
614 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
615 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
616 pr_err("SCTLR = %08x ACTLR = %08x\n",
617 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
0720d1f0
SM
618}
619
f78ebca8
S
620static void insert_iommu_master(struct device *dev,
621 struct msm_iommu_dev **iommu,
622 struct of_phandle_args *spec)
623{
624 struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
625 int sid;
626
627 if (list_empty(&(*iommu)->ctx_list)) {
628 master = kzalloc(sizeof(*master), GFP_ATOMIC);
629 master->of_node = dev->of_node;
630 list_add(&master->list, &(*iommu)->ctx_list);
631 dev->archdata.iommu = master;
632 }
633
634 for (sid = 0; sid < master->num_mids; sid++)
635 if (master->mids[sid] == spec->args[0]) {
636 dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
637 sid);
638 return;
639 }
640
641 master->mids[master->num_mids++] = spec->args[0];
642}
643
644static int qcom_iommu_of_xlate(struct device *dev,
645 struct of_phandle_args *spec)
646{
647 struct msm_iommu_dev *iommu;
648 unsigned long flags;
649 int ret = 0;
650
651 spin_lock_irqsave(&msm_iommu_lock, flags);
652 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
653 if (iommu->dev->of_node == spec->np)
654 break;
655
656 if (!iommu || iommu->dev->of_node != spec->np) {
657 ret = -ENODEV;
658 goto fail;
659 }
660
661 insert_iommu_master(dev, &iommu, spec);
662fail:
663 spin_unlock_irqrestore(&msm_iommu_lock, flags);
664
665 return ret;
666}
667
0720d1f0
SM
668irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
669{
109bd48e 670 struct msm_iommu_dev *iommu = dev_id;
33069739 671 unsigned int fsr;
a43d8c10 672 int i, ret;
0720d1f0
SM
673
674 spin_lock(&msm_iommu_lock);
675
109bd48e 676 if (!iommu) {
0720d1f0
SM
677 pr_err("Invalid device ID in context interrupt handler\n");
678 goto fail;
679 }
680
0720d1f0 681 pr_err("Unexpected IOMMU page fault!\n");
109bd48e 682 pr_err("base = %08x\n", (unsigned int)iommu->base);
0720d1f0 683
109bd48e 684 ret = __enable_clocks(iommu);
41f3f513
SM
685 if (ret)
686 goto fail;
687
109bd48e
S
688 for (i = 0; i < iommu->ncb; i++) {
689 fsr = GET_FSR(iommu->base, i);
0720d1f0
SM
690 if (fsr) {
691 pr_err("Fault occurred in context %d.\n", i);
692 pr_err("Interesting registers:\n");
109bd48e
S
693 print_ctx_regs(iommu->base, i);
694 SET_FSR(iommu->base, i, 0x4000000F);
0720d1f0
SM
695 }
696 }
109bd48e 697 __disable_clocks(iommu);
0720d1f0
SM
698fail:
699 spin_unlock(&msm_iommu_lock);
700 return 0;
701}
702
f78ebca8 703static struct iommu_ops msm_iommu_ops = {
4480845e 704 .capable = msm_iommu_capable,
3e116c3c
JR
705 .domain_alloc = msm_iommu_domain_alloc,
706 .domain_free = msm_iommu_domain_free,
0720d1f0
SM
707 .attach_dev = msm_iommu_attach_dev,
708 .detach_dev = msm_iommu_detach_dev,
709 .map = msm_iommu_map,
710 .unmap = msm_iommu_unmap,
315786eb 711 .map_sg = default_iommu_map_sg,
0720d1f0 712 .iova_to_phys = msm_iommu_iova_to_phys,
42df43b3
JR
713 .add_device = msm_iommu_add_device,
714 .remove_device = msm_iommu_remove_device,
ce2eb8f4 715 .device_group = generic_device_group,
83427275 716 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
f78ebca8 717 .of_xlate = qcom_iommu_of_xlate,
0720d1f0
SM
718};
719
f7f125ef
S
720static int msm_iommu_probe(struct platform_device *pdev)
721{
722 struct resource *r;
42df43b3 723 resource_size_t ioaddr;
f7f125ef
S
724 struct msm_iommu_dev *iommu;
725 int ret, par, val;
726
727 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
728 if (!iommu)
729 return -ENODEV;
730
731 iommu->dev = &pdev->dev;
732 INIT_LIST_HEAD(&iommu->ctx_list);
733
734 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
735 if (IS_ERR(iommu->pclk)) {
736 dev_err(iommu->dev, "could not get smmu_pclk\n");
737 return PTR_ERR(iommu->pclk);
738 }
739
740 ret = clk_prepare(iommu->pclk);
741 if (ret) {
742 dev_err(iommu->dev, "could not prepare smmu_pclk\n");
743 return ret;
744 }
745
746 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
747 if (IS_ERR(iommu->clk)) {
748 dev_err(iommu->dev, "could not get iommu_clk\n");
749 clk_unprepare(iommu->pclk);
750 return PTR_ERR(iommu->clk);
751 }
752
753 ret = clk_prepare(iommu->clk);
754 if (ret) {
755 dev_err(iommu->dev, "could not prepare iommu_clk\n");
756 clk_unprepare(iommu->pclk);
757 return ret;
758 }
759
760 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
761 iommu->base = devm_ioremap_resource(iommu->dev, r);
762 if (IS_ERR(iommu->base)) {
763 dev_err(iommu->dev, "could not get iommu base\n");
764 ret = PTR_ERR(iommu->base);
765 goto fail;
766 }
42df43b3 767 ioaddr = r->start;
f7f125ef
S
768
769 iommu->irq = platform_get_irq(pdev, 0);
770 if (iommu->irq < 0) {
771 dev_err(iommu->dev, "could not get iommu irq\n");
772 ret = -ENODEV;
773 goto fail;
774 }
775
776 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
777 if (ret) {
778 dev_err(iommu->dev, "could not get ncb\n");
779 goto fail;
780 }
781 iommu->ncb = val;
782
783 msm_iommu_reset(iommu->base, iommu->ncb);
784 SET_M(iommu->base, 0, 1);
785 SET_PAR(iommu->base, 0, 0);
786 SET_V2PCFG(iommu->base, 0, 1);
787 SET_V2PPR(iommu->base, 0, 0);
788 par = GET_PAR(iommu->base, 0);
789 SET_V2PCFG(iommu->base, 0, 0);
790 SET_M(iommu->base, 0, 0);
791
792 if (!par) {
793 pr_err("Invalid PAR value detected\n");
794 ret = -ENODEV;
795 goto fail;
796 }
797
798 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
799 msm_iommu_fault_handler,
800 IRQF_ONESHOT | IRQF_SHARED,
801 "msm_iommu_secure_irpt_handler",
802 iommu);
803 if (ret) {
804 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
805 goto fail;
806 }
807
808 list_add(&iommu->dev_node, &qcom_iommu_devices);
42df43b3
JR
809
810 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
811 "msm-smmu.%pa", &ioaddr);
812 if (ret) {
813 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
814 goto fail;
815 }
816
817 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
818 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
819
820 ret = iommu_device_register(&iommu->iommu);
821 if (ret) {
822 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
823 goto fail;
824 }
825
f7f125ef
S
826 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
827 iommu->base, iommu->irq, iommu->ncb);
828
829 return ret;
830fail:
831 clk_unprepare(iommu->clk);
832 clk_unprepare(iommu->pclk);
833 return ret;
834}
835
836static const struct of_device_id msm_iommu_dt_match[] = {
837 { .compatible = "qcom,apq8064-iommu" },
838 {}
839};
840
841static int msm_iommu_remove(struct platform_device *pdev)
842{
843 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
844
845 clk_unprepare(iommu->clk);
846 clk_unprepare(iommu->pclk);
847 return 0;
848}
849
850static struct platform_driver msm_iommu_driver = {
851 .driver = {
852 .name = "msm_iommu",
853 .of_match_table = msm_iommu_dt_match,
854 },
855 .probe = msm_iommu_probe,
856 .remove = msm_iommu_remove,
857};
858
859static int __init msm_iommu_driver_init(void)
860{
861 int ret;
862
863 ret = platform_driver_register(&msm_iommu_driver);
864 if (ret != 0)
865 pr_err("Failed to register IOMMU driver\n");
866
867 return ret;
868}
869
870static void __exit msm_iommu_driver_exit(void)
871{
872 platform_driver_unregister(&msm_iommu_driver);
873}
874
875subsys_initcall(msm_iommu_driver_init);
876module_exit(msm_iommu_driver_exit);
877
516cbc79 878static int __init msm_iommu_init(void)
0720d1f0 879{
85eebbc5 880 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
0720d1f0
SM
881 return 0;
882}
883
f78ebca8
S
884static int __init msm_iommu_of_setup(struct device_node *np)
885{
886 msm_iommu_init();
887 return 0;
888}
889
890IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
0720d1f0
SM
891
892MODULE_LICENSE("GPL v2");
893MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");