iommu/ipmmu-vmsa: Add helper functions for "uTLB" registers
[linux-2.6-block.git] / drivers / iommu / ipmmu-vmsa.c
CommitLineData
57d3f11c 1// SPDX-License-Identifier: GPL-2.0
d25a2a16 2/*
8128ac3b
PG
3 * IOMMU API for Renesas VMSA-compatible IPMMU
4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
d25a2a16
LP
5 *
6 * Copyright (C) 2014 Renesas Electronics Corporation
d25a2a16
LP
7 */
8
dbb70692 9#include <linux/bitmap.h>
d25a2a16 10#include <linux/delay.h>
3ae47292 11#include <linux/dma-iommu.h>
d25a2a16
LP
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/export.h>
8128ac3b 15#include <linux/init.h>
d25a2a16
LP
16#include <linux/interrupt.h>
17#include <linux/io.h>
b77cf11f 18#include <linux/io-pgtable.h>
d25a2a16 19#include <linux/iommu.h>
275f5053 20#include <linux/of.h>
33f3ac9b 21#include <linux/of_device.h>
cda52fcd 22#include <linux/of_iommu.h>
7b2d5961 23#include <linux/of_platform.h>
d25a2a16
LP
24#include <linux/platform_device.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
58b8e8bf 27#include <linux/sys_soc.h>
d25a2a16 28
3ae47292 29#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
d25a2a16
LP
30#include <asm/dma-iommu.h>
31#include <asm/pgalloc.h>
49c875f0
RM
32#else
33#define arm_iommu_create_mapping(...) NULL
34#define arm_iommu_attach_device(...) -ENODEV
35#define arm_iommu_release_mapping(...) do {} while (0)
36#define arm_iommu_detach_device(...) do {} while (0)
3ae47292 37#endif
d25a2a16 38
da38e9ec
GU
39#define IPMMU_CTX_MAX 8U
40#define IPMMU_CTX_INVALID -1
41
42#define IPMMU_UTLB_MAX 48U
dbb70692 43
33f3ac9b
MD
44struct ipmmu_features {
45 bool use_ns_alias_offset;
fd5140e2 46 bool has_cache_leaf_nodes;
5fd16341 47 unsigned int number_of_contexts;
b7f3f047 48 unsigned int num_utlbs;
f5c85891 49 bool setup_imbuscr;
c295f504 50 bool twobit_imttbcr_sl0;
2ae86955 51 bool reserved_context;
3623002f 52 bool cache_snoop;
3dc28d9f
YS
53 unsigned int ctx_offset_base;
54 unsigned int ctx_offset_stride;
33f3ac9b
MD
55};
56
d25a2a16
LP
57struct ipmmu_vmsa_device {
58 struct device *dev;
59 void __iomem *base;
01da21e5 60 struct iommu_device iommu;
fd5140e2 61 struct ipmmu_vmsa_device *root;
33f3ac9b 62 const struct ipmmu_features *features;
5fd16341 63 unsigned int num_ctx;
dbb70692
MD
64 spinlock_t lock; /* Protects ctx and domains[] */
65 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
66 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
da38e9ec 67 s8 utlb_ctx[IPMMU_UTLB_MAX];
d25a2a16 68
b354c73e 69 struct iommu_group *group;
d25a2a16
LP
70 struct dma_iommu_mapping *mapping;
71};
72
73struct ipmmu_vmsa_domain {
74 struct ipmmu_vmsa_device *mmu;
5914c5fd 75 struct iommu_domain io_domain;
d25a2a16 76
f20ed39f
LP
77 struct io_pgtable_cfg cfg;
78 struct io_pgtable_ops *iop;
79
d25a2a16 80 unsigned int context_id;
46583e8c 81 struct mutex mutex; /* Protects mappings */
d25a2a16
LP
82};
83
5914c5fd
JR
84static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
85{
86 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
87}
88
e4efe4a9 89static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
0fbc8b04 90{
df903655
JR
91 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
92
93 return fwspec ? fwspec->iommu_priv : NULL;
0fbc8b04
MD
94}
95
d25a2a16
LP
96#define TLB_LOOP_TIMEOUT 100 /* 100us */
97
98/* -----------------------------------------------------------------------------
99 * Registers Definition
100 */
101
275f5053
LP
102#define IM_NS_ALIAS_OFFSET 0x800
103
df9828aa
YS
104/* MMU "context" registers */
105#define IMCTR 0x0000 /* R-Car Gen2/3 */
106#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
107#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
108#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
109
110#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
111#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
3623002f 112#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
3623002f 113#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
3623002f 114#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
5ca54fdc 115#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
df9828aa 116#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
d25a2a16 117
df9828aa
YS
118#define IMBUSCR 0x000c /* R-Car Gen2 only */
119#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
120#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
d25a2a16 121
df9828aa
YS
122#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
123#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
d25a2a16 124
df9828aa
YS
125#define IMSTR 0x0020 /* R-Car Gen2/3 */
126#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
127#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
128#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
129#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
d25a2a16 130
df9828aa 131#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
d25a2a16 132
df9828aa
YS
133#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
134#define IMEUAR 0x0034 /* R-Car Gen3 only */
d25a2a16 135
df9828aa 136/* uTLB registers */
ddbbddd7 137#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
df9828aa
YS
138#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
139#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
140#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
141#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
142#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
d25a2a16 143
ddbbddd7 144#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
df9828aa
YS
145#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
146#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
d25a2a16 147
fd5140e2
MD
148/* -----------------------------------------------------------------------------
149 * Root device handling
150 */
151
152static struct platform_driver ipmmu_driver;
153
154static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
155{
156 return mmu->root == mmu;
157}
158
159static int __ipmmu_check_device(struct device *dev, void *data)
160{
161 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
162 struct ipmmu_vmsa_device **rootp = data;
163
164 if (ipmmu_is_root(mmu))
165 *rootp = mmu;
166
167 return 0;
168}
169
170static struct ipmmu_vmsa_device *ipmmu_find_root(void)
171{
172 struct ipmmu_vmsa_device *root = NULL;
173
174 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
175 __ipmmu_check_device) == 0 ? root : NULL;
176}
177
d25a2a16
LP
178/* -----------------------------------------------------------------------------
179 * Read/Write Access
180 */
181
182static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
183{
184 return ioread32(mmu->base + offset);
185}
186
187static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
188 u32 data)
189{
190 iowrite32(data, mmu->base + offset);
191}
192
16d9454f
YS
193static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
194 unsigned int context_id, unsigned int reg)
195{
3dc28d9f
YS
196 return mmu->features->ctx_offset_base +
197 context_id * mmu->features->ctx_offset_stride + reg;
16d9454f
YS
198}
199
200static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
201 unsigned int context_id, unsigned int reg)
202{
203 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
204}
205
206static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
207 unsigned int context_id, unsigned int reg, u32 data)
208{
209 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
210}
211
d574893a
MD
212static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
213 unsigned int reg)
d25a2a16 214{
16d9454f 215 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
d25a2a16
LP
216}
217
d574893a
MD
218static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
219 unsigned int reg, u32 data)
d25a2a16 220{
16d9454f 221 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
d25a2a16
LP
222}
223
d574893a
MD
224static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
225 unsigned int reg, u32 data)
226{
227 if (domain->mmu != domain->mmu->root)
16d9454f 228 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
d574893a 229
16d9454f 230 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
d574893a
MD
231}
232
3667c997
YS
233static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
234{
235 return reg;
236}
237
238static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
239 unsigned int utlb, u32 data)
240{
241 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
242}
243
244static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
245 unsigned int utlb, u32 data)
246{
247 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
248}
249
d25a2a16
LP
250/* -----------------------------------------------------------------------------
251 * TLB and microTLB Management
252 */
253
254/* Wait for any pending TLB invalidations to complete */
255static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
256{
257 unsigned int count = 0;
258
d574893a 259 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
d25a2a16
LP
260 cpu_relax();
261 if (++count == TLB_LOOP_TIMEOUT) {
262 dev_err_ratelimited(domain->mmu->dev,
263 "TLB sync timed out -- MMU may be deadlocked\n");
264 return;
265 }
266 udelay(1);
267 }
268}
269
270static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
271{
272 u32 reg;
273
d574893a 274 reg = ipmmu_ctx_read_root(domain, IMCTR);
d25a2a16 275 reg |= IMCTR_FLUSH;
d574893a 276 ipmmu_ctx_write_all(domain, IMCTR, reg);
d25a2a16
LP
277
278 ipmmu_tlb_sync(domain);
279}
280
281/*
282 * Enable MMU translation for the microTLB.
283 */
284static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
192d2045 285 unsigned int utlb)
d25a2a16
LP
286{
287 struct ipmmu_vmsa_device *mmu = domain->mmu;
288
192d2045
LP
289 /*
290 * TODO: Reference-count the microTLB as several bus masters can be
291 * connected to the same microTLB.
292 */
293
d25a2a16 294 /* TODO: What should we set the ASID to ? */
3667c997 295 ipmmu_imuasid_write(mmu, utlb, 0);
d25a2a16 296 /* TODO: Do we need to flush the microTLB ? */
3667c997
YS
297 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
298 IMUCTR_FLUSH | IMUCTR_MMUEN);
da38e9ec 299 mmu->utlb_ctx[utlb] = domain->context_id;
d25a2a16
LP
300}
301
302/*
303 * Disable MMU translation for the microTLB.
304 */
305static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
192d2045 306 unsigned int utlb)
d25a2a16
LP
307{
308 struct ipmmu_vmsa_device *mmu = domain->mmu;
309
3667c997 310 ipmmu_imuctr_write(mmu, utlb, 0);
da38e9ec 311 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
d25a2a16
LP
312}
313
f20ed39f 314static void ipmmu_tlb_flush_all(void *cookie)
d25a2a16 315{
f20ed39f
LP
316 struct ipmmu_vmsa_domain *domain = cookie;
317
318 ipmmu_tlb_invalidate(domain);
319}
320
05aed941
WD
321static void ipmmu_tlb_flush(unsigned long iova, size_t size,
322 size_t granule, void *cookie)
f20ed39f 323{
05aed941 324 ipmmu_tlb_flush_all(cookie);
f20ed39f
LP
325}
326
298f7889 327static const struct iommu_flush_ops ipmmu_flush_ops = {
f20ed39f 328 .tlb_flush_all = ipmmu_tlb_flush_all,
05aed941
WD
329 .tlb_flush_walk = ipmmu_tlb_flush,
330 .tlb_flush_leaf = ipmmu_tlb_flush,
f20ed39f
LP
331};
332
d25a2a16
LP
333/* -----------------------------------------------------------------------------
334 * Domain/Context Management
335 */
336
dbb70692
MD
337static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
338 struct ipmmu_vmsa_domain *domain)
339{
340 unsigned long flags;
341 int ret;
342
343 spin_lock_irqsave(&mmu->lock, flags);
344
5fd16341
MD
345 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
346 if (ret != mmu->num_ctx) {
dbb70692
MD
347 mmu->domains[ret] = domain;
348 set_bit(ret, mmu->ctx);
5fd16341
MD
349 } else
350 ret = -EBUSY;
dbb70692
MD
351
352 spin_unlock_irqrestore(&mmu->lock, flags);
353
354 return ret;
355}
356
a175a67d
OT
357static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
358 unsigned int context_id)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&mmu->lock, flags);
363
364 clear_bit(context_id, mmu->ctx);
365 mmu->domains[context_id] = NULL;
366
367 spin_unlock_irqrestore(&mmu->lock, flags);
368}
369
892db541 370static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
d25a2a16 371{
f64232ee 372 u64 ttbr;
c295f504 373 u32 tmp;
a175a67d 374
d25a2a16 375 /* TTBR0 */
f20ed39f 376 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
d574893a
MD
377 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
378 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
d25a2a16
LP
379
380 /*
381 * TTBCR
3623002f
HNP
382 * We use long descriptors and allocate the whole 32-bit VA space to
383 * TTBR0.
d25a2a16 384 */
c295f504
MD
385 if (domain->mmu->features->twobit_imttbcr_sl0)
386 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
387 else
388 tmp = IMTTBCR_SL0_LVL_1;
389
3623002f
HNP
390 if (domain->mmu->features->cache_snoop)
391 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
392 IMTTBCR_IRGN0_WB_WA;
393
394 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
d25a2a16 395
f20ed39f 396 /* MAIR0 */
d574893a
MD
397 ipmmu_ctx_write_root(domain, IMMAIR0,
398 domain->cfg.arm_lpae_s1_cfg.mair[0]);
d25a2a16
LP
399
400 /* IMBUSCR */
f5c85891
MD
401 if (domain->mmu->features->setup_imbuscr)
402 ipmmu_ctx_write_root(domain, IMBUSCR,
403 ipmmu_ctx_read_root(domain, IMBUSCR) &
404 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
d25a2a16
LP
405
406 /*
407 * IMSTR
408 * Clear all interrupt flags.
409 */
d574893a 410 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
d25a2a16
LP
411
412 /*
413 * IMCTR
414 * Enable the MMU and interrupt generation. The long-descriptor
415 * translation table format doesn't use TEX remapping. Don't enable AF
416 * software management as we have no use for it. Flush the TLB as
417 * required when modifying the context registers.
418 */
d574893a
MD
419 ipmmu_ctx_write_all(domain, IMCTR,
420 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
892db541
GU
421}
422
423static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
424{
425 int ret;
426
427 /*
428 * Allocate the page table operations.
429 *
430 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
431 * access, Long-descriptor format" that the NStable bit being set in a
432 * table descriptor will result in the NStable and NS bits of all child
433 * entries being ignored and considered as being set. The IPMMU seems
434 * not to comply with this, as it generates a secure access page fault
435 * if any of the NStable and NS bits isn't set when running in
436 * non-secure mode.
437 */
438 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
439 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
440 domain->cfg.ias = 32;
441 domain->cfg.oas = 40;
298f7889 442 domain->cfg.tlb = &ipmmu_flush_ops;
892db541
GU
443 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
444 domain->io_domain.geometry.force_aperture = true;
445 /*
446 * TODO: Add support for coherent walk through CCI with DVM and remove
447 * cache handling. For now, delegate it to the io-pgtable code.
448 */
3430abd6 449 domain->cfg.coherent_walk = false;
892db541
GU
450 domain->cfg.iommu_dev = domain->mmu->root->dev;
451
452 /*
453 * Find an unused context.
454 */
455 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
456 if (ret < 0)
457 return ret;
458
459 domain->context_id = ret;
460
461 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
462 domain);
463 if (!domain->iop) {
464 ipmmu_domain_free_context(domain->mmu->root,
465 domain->context_id);
466 return -EINVAL;
467 }
d25a2a16 468
892db541 469 ipmmu_domain_setup_context(domain);
d25a2a16
LP
470 return 0;
471}
472
473static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
474{
e5b78f2e
GU
475 if (!domain->mmu)
476 return;
477
d25a2a16
LP
478 /*
479 * Disable the context. Flush the TLB as required when modifying the
480 * context registers.
481 *
482 * TODO: Is TLB flush really needed ?
483 */
d574893a 484 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
d25a2a16 485 ipmmu_tlb_sync(domain);
fd5140e2 486 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
d25a2a16
LP
487}
488
489/* -----------------------------------------------------------------------------
490 * Fault Handling
491 */
492
493static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
494{
495 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
496 struct ipmmu_vmsa_device *mmu = domain->mmu;
82576aa8 497 unsigned long iova;
d25a2a16 498 u32 status;
d25a2a16 499
d574893a 500 status = ipmmu_ctx_read_root(domain, IMSTR);
d25a2a16
LP
501 if (!(status & err_mask))
502 return IRQ_NONE;
503
82576aa8
GU
504 iova = ipmmu_ctx_read_root(domain, IMELAR);
505 if (IS_ENABLED(CONFIG_64BIT))
506 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
d25a2a16
LP
507
508 /*
509 * Clear the error status flags. Unlike traditional interrupt flag
510 * registers that must be cleared by writing 1, this status register
511 * seems to require 0. The error address register must be read before,
512 * otherwise its value will be 0.
513 */
d574893a 514 ipmmu_ctx_write_root(domain, IMSTR, 0);
d25a2a16
LP
515
516 /* Log fatal errors. */
517 if (status & IMSTR_MHIT)
82576aa8 518 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
d25a2a16
LP
519 iova);
520 if (status & IMSTR_ABORT)
82576aa8 521 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
d25a2a16
LP
522 iova);
523
524 if (!(status & (IMSTR_PF | IMSTR_TF)))
525 return IRQ_NONE;
526
527 /*
528 * Try to handle page faults and translation faults.
529 *
530 * TODO: We need to look up the faulty device based on the I/O VA. Use
531 * the IOMMU device for now.
532 */
5914c5fd 533 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
d25a2a16
LP
534 return IRQ_HANDLED;
535
536 dev_err_ratelimited(mmu->dev,
82576aa8 537 "Unhandled fault: status 0x%08x iova 0x%lx\n",
d25a2a16
LP
538 status, iova);
539
540 return IRQ_HANDLED;
541}
542
543static irqreturn_t ipmmu_irq(int irq, void *dev)
544{
545 struct ipmmu_vmsa_device *mmu = dev;
dbb70692
MD
546 irqreturn_t status = IRQ_NONE;
547 unsigned int i;
548 unsigned long flags;
d25a2a16 549
dbb70692
MD
550 spin_lock_irqsave(&mmu->lock, flags);
551
552 /*
553 * Check interrupts for all active contexts.
554 */
5fd16341 555 for (i = 0; i < mmu->num_ctx; i++) {
dbb70692
MD
556 if (!mmu->domains[i])
557 continue;
558 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
559 status = IRQ_HANDLED;
560 }
d25a2a16 561
dbb70692 562 spin_unlock_irqrestore(&mmu->lock, flags);
d25a2a16 563
dbb70692 564 return status;
d25a2a16
LP
565}
566
d25a2a16
LP
567/* -----------------------------------------------------------------------------
568 * IOMMU Operations
569 */
570
8e73bf65 571static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
d25a2a16
LP
572{
573 struct ipmmu_vmsa_domain *domain;
574
575 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
576 if (!domain)
5914c5fd 577 return NULL;
d25a2a16 578
46583e8c 579 mutex_init(&domain->mutex);
d25a2a16 580
5914c5fd 581 return &domain->io_domain;
d25a2a16
LP
582}
583
1c7e7c02
RM
584static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
585{
586 struct iommu_domain *io_domain = NULL;
587
588 switch (type) {
589 case IOMMU_DOMAIN_UNMANAGED:
590 io_domain = __ipmmu_domain_alloc(type);
591 break;
592
593 case IOMMU_DOMAIN_DMA:
594 io_domain = __ipmmu_domain_alloc(type);
595 if (io_domain && iommu_get_dma_cookie(io_domain)) {
596 kfree(io_domain);
597 io_domain = NULL;
598 }
599 break;
600 }
601
602 return io_domain;
603}
604
5914c5fd 605static void ipmmu_domain_free(struct iommu_domain *io_domain)
d25a2a16 606{
5914c5fd 607 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16
LP
608
609 /*
610 * Free the domain resources. We assume that all devices have already
611 * been detached.
612 */
1c7e7c02 613 iommu_put_dma_cookie(io_domain);
d25a2a16 614 ipmmu_domain_destroy_context(domain);
f20ed39f 615 free_io_pgtable_ops(domain->iop);
d25a2a16
LP
616 kfree(domain);
617}
618
619static int ipmmu_attach_device(struct iommu_domain *io_domain,
620 struct device *dev)
621{
df903655 622 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
e4efe4a9 623 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
5914c5fd 624 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
a166d31e 625 unsigned int i;
d25a2a16
LP
626 int ret = 0;
627
e4efe4a9 628 if (!mmu) {
d25a2a16
LP
629 dev_err(dev, "Cannot attach to IPMMU\n");
630 return -ENXIO;
631 }
632
46583e8c 633 mutex_lock(&domain->mutex);
d25a2a16
LP
634
635 if (!domain->mmu) {
636 /* The domain hasn't been used yet, initialize it. */
637 domain->mmu = mmu;
638 ret = ipmmu_domain_init_context(domain);
5fd16341
MD
639 if (ret < 0) {
640 dev_err(dev, "Unable to initialize IPMMU context\n");
641 domain->mmu = NULL;
642 } else {
643 dev_info(dev, "Using IPMMU context %u\n",
644 domain->context_id);
645 }
d25a2a16
LP
646 } else if (domain->mmu != mmu) {
647 /*
648 * Something is wrong, we can't attach two devices using
649 * different IOMMUs to the same domain.
650 */
651 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
652 dev_name(mmu->dev), dev_name(domain->mmu->dev));
653 ret = -EINVAL;
3ae47292
MD
654 } else
655 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
d25a2a16 656
46583e8c 657 mutex_unlock(&domain->mutex);
d25a2a16
LP
658
659 if (ret < 0)
660 return ret;
661
7b2d5961
MD
662 for (i = 0; i < fwspec->num_ids; ++i)
663 ipmmu_utlb_enable(domain, fwspec->ids[i]);
d25a2a16
LP
664
665 return 0;
666}
667
668static void ipmmu_detach_device(struct iommu_domain *io_domain,
669 struct device *dev)
670{
df903655 671 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
5914c5fd 672 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
a166d31e 673 unsigned int i;
d25a2a16 674
7b2d5961
MD
675 for (i = 0; i < fwspec->num_ids; ++i)
676 ipmmu_utlb_disable(domain, fwspec->ids[i]);
d25a2a16
LP
677
678 /*
679 * TODO: Optimize by disabling the context when no device is attached.
680 */
681}
682
683static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
684 phys_addr_t paddr, size_t size, int prot)
685{
5914c5fd 686 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16
LP
687
688 if (!domain)
689 return -ENODEV;
690
f20ed39f 691 return domain->iop->map(domain->iop, iova, paddr, size, prot);
d25a2a16
LP
692}
693
694static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
56f8af5e 695 size_t size, struct iommu_iotlb_gather *gather)
d25a2a16 696{
5914c5fd 697 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16 698
a2d3a382 699 return domain->iop->unmap(domain->iop, iova, size, gather);
d25a2a16
LP
700}
701
56f8af5e 702static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
32b12449
RM
703{
704 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
705
706 if (domain->mmu)
707 ipmmu_tlb_flush_all(domain);
708}
709
56f8af5e
WD
710static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
711 struct iommu_iotlb_gather *gather)
712{
713 ipmmu_flush_iotlb_all(io_domain);
714}
715
d25a2a16
LP
716static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
717 dma_addr_t iova)
718{
5914c5fd 719 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16
LP
720
721 /* TODO: Is locking needed ? */
722
f20ed39f 723 return domain->iop->iova_to_phys(domain->iop, iova);
d25a2a16
LP
724}
725
7b2d5961
MD
726static int ipmmu_init_platform_device(struct device *dev,
727 struct of_phandle_args *args)
d25a2a16 728{
df903655 729 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
7b2d5961 730 struct platform_device *ipmmu_pdev;
bb590c90 731
7b2d5961
MD
732 ipmmu_pdev = of_find_device_by_node(args->np);
733 if (!ipmmu_pdev)
bb590c90
LP
734 return -ENODEV;
735
df903655 736 fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
383fef5f 737
383fef5f 738 return 0;
58b8e8bf
MD
739}
740
0b8ac140 741static const struct soc_device_attribute soc_rcar_gen3[] = {
60fb0083 742 { .soc_id = "r8a774a1", },
757f26a3 743 { .soc_id = "r8a774b1", },
b6d39cd8 744 { .soc_id = "r8a774c0", },
58b8e8bf 745 { .soc_id = "r8a7795", },
0b8ac140 746 { .soc_id = "r8a7796", },
98dbffd3 747 { .soc_id = "r8a77965", },
3701c123 748 { .soc_id = "r8a77970", },
b0c32912 749 { .soc_id = "r8a77990", },
3701c123 750 { .soc_id = "r8a77995", },
58b8e8bf
MD
751 { /* sentinel */ }
752};
753
b7ee92c6 754static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
757f26a3 755 { .soc_id = "r8a774b1", },
b6d39cd8 756 { .soc_id = "r8a774c0", },
b7ee92c6
YS
757 { .soc_id = "r8a7795", .revision = "ES3.*" },
758 { .soc_id = "r8a77965", },
759 { .soc_id = "r8a77990", },
3701c123 760 { .soc_id = "r8a77995", },
58b8e8bf
MD
761 { /* sentinel */ }
762};
763
80759649
YS
764static const char * const rcar_gen3_slave_whitelist[] = {
765};
766
b7ee92c6
YS
767static bool ipmmu_slave_whitelist(struct device *dev)
768{
80759649
YS
769 unsigned int i;
770
b7ee92c6
YS
771 /*
772 * For R-Car Gen3 use a white list to opt-in slave devices.
773 * For Other SoCs, this returns true anyway.
774 */
775 if (!soc_device_match(soc_rcar_gen3))
776 return true;
777
778 /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
779 if (!soc_device_match(soc_rcar_gen3_whitelist))
780 return false;
781
80759649
YS
782 /* Check whether this slave device can work with the IPMMU */
783 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
784 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
785 return true;
786 }
787
788 /* Otherwise, do not allow use of IPMMU */
b7ee92c6
YS
789 return false;
790}
791
49558da0
MD
792static int ipmmu_of_xlate(struct device *dev,
793 struct of_phandle_args *spec)
794{
b7ee92c6 795 if (!ipmmu_slave_whitelist(dev))
58b8e8bf
MD
796 return -ENODEV;
797
7b2d5961
MD
798 iommu_fwspec_add_ids(dev, spec->args, 1);
799
49558da0 800 /* Initialize once - xlate() will call multiple times */
e4efe4a9 801 if (to_ipmmu(dev))
49558da0
MD
802 return 0;
803
7b2d5961 804 return ipmmu_init_platform_device(dev, spec);
49558da0
MD
805}
806
49c875f0 807static int ipmmu_init_arm_mapping(struct device *dev)
383fef5f 808{
e4efe4a9 809 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
383fef5f
MD
810 struct iommu_group *group;
811 int ret;
812
d25a2a16
LP
813 /* Create a device group and add the device to it. */
814 group = iommu_group_alloc();
815 if (IS_ERR(group)) {
816 dev_err(dev, "Failed to allocate IOMMU group\n");
49c875f0 817 return PTR_ERR(group);
d25a2a16
LP
818 }
819
820 ret = iommu_group_add_device(group, dev);
821 iommu_group_put(group);
822
823 if (ret < 0) {
824 dev_err(dev, "Failed to add device to IPMMU group\n");
49c875f0 825 return ret;
d25a2a16
LP
826 }
827
d25a2a16
LP
828 /*
829 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
830 * VAs. This will allocate a corresponding IOMMU domain.
831 *
832 * TODO:
833 * - Create one mapping per context (TLB).
834 * - Make the mapping size configurable ? We currently use a 2GB mapping
835 * at a 1GB offset to ensure that NULL VAs will fault.
836 */
837 if (!mmu->mapping) {
838 struct dma_iommu_mapping *mapping;
839
840 mapping = arm_iommu_create_mapping(&platform_bus_type,
720b0cef 841 SZ_1G, SZ_2G);
d25a2a16
LP
842 if (IS_ERR(mapping)) {
843 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
b8f80bff
LP
844 ret = PTR_ERR(mapping);
845 goto error;
d25a2a16
LP
846 }
847
848 mmu->mapping = mapping;
849 }
850
851 /* Attach the ARM VA mapping to the device. */
852 ret = arm_iommu_attach_device(dev, mmu->mapping);
853 if (ret < 0) {
854 dev_err(dev, "Failed to attach device to VA mapping\n");
855 goto error;
856 }
857
858 return 0;
859
860error:
49c875f0
RM
861 iommu_group_remove_device(dev);
862 if (mmu->mapping)
383fef5f 863 arm_iommu_release_mapping(mmu->mapping);
a166d31e 864
d25a2a16
LP
865 return ret;
866}
867
49c875f0 868static int ipmmu_add_device(struct device *dev)
3ae47292 869{
80eaa9f5 870 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
3ae47292 871 struct iommu_group *group;
80eaa9f5 872 int ret;
3ae47292 873
0fbc8b04
MD
874 /*
875 * Only let through devices that have been verified in xlate()
0fbc8b04 876 */
80eaa9f5 877 if (!mmu)
3ae47292
MD
878 return -ENODEV;
879
80eaa9f5
GU
880 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
881 ret = ipmmu_init_arm_mapping(dev);
882 if (ret)
883 return ret;
884 } else {
885 group = iommu_group_get_for_dev(dev);
886 if (IS_ERR(group))
887 return PTR_ERR(group);
49c875f0 888
80eaa9f5
GU
889 iommu_group_put(group);
890 }
3ae47292 891
80eaa9f5 892 iommu_device_link(&mmu->iommu, dev);
3ae47292
MD
893 return 0;
894}
895
49c875f0 896static void ipmmu_remove_device(struct device *dev)
3ae47292 897{
80eaa9f5
GU
898 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
899
900 iommu_device_unlink(&mmu->iommu, dev);
49c875f0 901 arm_iommu_detach_device(dev);
3ae47292
MD
902 iommu_group_remove_device(dev);
903}
904
b354c73e 905static struct iommu_group *ipmmu_find_group(struct device *dev)
3ae47292 906{
e4efe4a9 907 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
3ae47292 908 struct iommu_group *group;
3ae47292 909
e4efe4a9
RM
910 if (mmu->group)
911 return iommu_group_ref_get(mmu->group);
b354c73e
RM
912
913 group = iommu_group_alloc();
914 if (!IS_ERR(group))
e4efe4a9 915 mmu->group = group;
3ae47292
MD
916
917 return group;
918}
919
3ae47292 920static const struct iommu_ops ipmmu_ops = {
1c7e7c02
RM
921 .domain_alloc = ipmmu_domain_alloc,
922 .domain_free = ipmmu_domain_free,
3ae47292
MD
923 .attach_dev = ipmmu_attach_device,
924 .detach_dev = ipmmu_detach_device,
925 .map = ipmmu_map,
926 .unmap = ipmmu_unmap,
56f8af5e 927 .flush_iotlb_all = ipmmu_flush_iotlb_all,
32b12449 928 .iotlb_sync = ipmmu_iotlb_sync,
3ae47292 929 .iova_to_phys = ipmmu_iova_to_phys,
49c875f0
RM
930 .add_device = ipmmu_add_device,
931 .remove_device = ipmmu_remove_device,
b354c73e 932 .device_group = ipmmu_find_group,
3ae47292 933 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
49558da0 934 .of_xlate = ipmmu_of_xlate,
3ae47292
MD
935};
936
d25a2a16
LP
937/* -----------------------------------------------------------------------------
938 * Probe/remove and init
939 */
940
941static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
942{
943 unsigned int i;
944
945 /* Disable all contexts. */
5fd16341 946 for (i = 0; i < mmu->num_ctx; ++i)
16d9454f 947 ipmmu_ctx_write(mmu, i, IMCTR, 0);
d25a2a16
LP
948}
949
33f3ac9b
MD
950static const struct ipmmu_features ipmmu_features_default = {
951 .use_ns_alias_offset = true,
fd5140e2 952 .has_cache_leaf_nodes = false,
5fd16341 953 .number_of_contexts = 1, /* software only tested with one context */
b7f3f047 954 .num_utlbs = 32,
f5c85891 955 .setup_imbuscr = true,
c295f504 956 .twobit_imttbcr_sl0 = false,
2ae86955 957 .reserved_context = false,
3623002f 958 .cache_snoop = true,
3dc28d9f
YS
959 .ctx_offset_base = 0,
960 .ctx_offset_stride = 0x40,
33f3ac9b
MD
961};
962
0b8ac140 963static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
58b8e8bf
MD
964 .use_ns_alias_offset = false,
965 .has_cache_leaf_nodes = true,
966 .number_of_contexts = 8,
b7f3f047 967 .num_utlbs = 48,
58b8e8bf
MD
968 .setup_imbuscr = false,
969 .twobit_imttbcr_sl0 = true,
2ae86955 970 .reserved_context = true,
3623002f 971 .cache_snoop = false,
3dc28d9f
YS
972 .ctx_offset_base = 0,
973 .ctx_offset_stride = 0x40,
58b8e8bf
MD
974};
975
33f3ac9b
MD
976static const struct of_device_id ipmmu_of_ids[] = {
977 {
978 .compatible = "renesas,ipmmu-vmsa",
979 .data = &ipmmu_features_default,
60fb0083
FC
980 }, {
981 .compatible = "renesas,ipmmu-r8a774a1",
982 .data = &ipmmu_features_rcar_gen3,
757f26a3
BD
983 }, {
984 .compatible = "renesas,ipmmu-r8a774b1",
985 .data = &ipmmu_features_rcar_gen3,
b6d39cd8
FC
986 }, {
987 .compatible = "renesas,ipmmu-r8a774c0",
988 .data = &ipmmu_features_rcar_gen3,
58b8e8bf
MD
989 }, {
990 .compatible = "renesas,ipmmu-r8a7795",
0b8ac140
MD
991 .data = &ipmmu_features_rcar_gen3,
992 }, {
993 .compatible = "renesas,ipmmu-r8a7796",
994 .data = &ipmmu_features_rcar_gen3,
98dbffd3
JM
995 }, {
996 .compatible = "renesas,ipmmu-r8a77965",
997 .data = &ipmmu_features_rcar_gen3,
3701c123
SH
998 }, {
999 .compatible = "renesas,ipmmu-r8a77970",
1000 .data = &ipmmu_features_rcar_gen3,
b0c32912
HNP
1001 }, {
1002 .compatible = "renesas,ipmmu-r8a77990",
1003 .data = &ipmmu_features_rcar_gen3,
3701c123
SH
1004 }, {
1005 .compatible = "renesas,ipmmu-r8a77995",
1006 .data = &ipmmu_features_rcar_gen3,
33f3ac9b
MD
1007 }, {
1008 /* Terminator */
1009 },
1010};
1011
d25a2a16
LP
1012static int ipmmu_probe(struct platform_device *pdev)
1013{
1014 struct ipmmu_vmsa_device *mmu;
1015 struct resource *res;
1016 int irq;
1017 int ret;
1018
d25a2a16
LP
1019 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1020 if (!mmu) {
1021 dev_err(&pdev->dev, "cannot allocate device data\n");
1022 return -ENOMEM;
1023 }
1024
1025 mmu->dev = &pdev->dev;
dbb70692
MD
1026 spin_lock_init(&mmu->lock);
1027 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
33f3ac9b 1028 mmu->features = of_device_get_match_data(&pdev->dev);
da38e9ec 1029 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1c894225 1030 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
d25a2a16
LP
1031
1032 /* Map I/O memory and request IRQ. */
1033 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1034 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1035 if (IS_ERR(mmu->base))
1036 return PTR_ERR(mmu->base);
1037
275f5053
LP
1038 /*
1039 * The IPMMU has two register banks, for secure and non-secure modes.
1040 * The bank mapped at the beginning of the IPMMU address space
1041 * corresponds to the running mode of the CPU. When running in secure
1042 * mode the non-secure register bank is also available at an offset.
1043 *
1044 * Secure mode operation isn't clearly documented and is thus currently
1045 * not implemented in the driver. Furthermore, preliminary tests of
1046 * non-secure operation with the main register bank were not successful.
1047 * Offset the registers base unconditionally to point to the non-secure
1048 * alias space for now.
1049 */
33f3ac9b
MD
1050 if (mmu->features->use_ns_alias_offset)
1051 mmu->base += IM_NS_ALIAS_OFFSET;
275f5053 1052
b43e0d8a 1053 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
5fd16341 1054
d25a2a16 1055 irq = platform_get_irq(pdev, 0);
d25a2a16 1056
fd5140e2
MD
1057 /*
1058 * Determine if this IPMMU instance is a root device by checking for
1059 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1060 */
1061 if (!mmu->features->has_cache_leaf_nodes ||
1062 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1063 mmu->root = mmu;
1064 else
1065 mmu->root = ipmmu_find_root();
d25a2a16 1066
fd5140e2
MD
1067 /*
1068 * Wait until the root device has been registered for sure.
1069 */
1070 if (!mmu->root)
1071 return -EPROBE_DEFER;
1072
1073 /* Root devices have mandatory IRQs */
1074 if (ipmmu_is_root(mmu)) {
1075 if (irq < 0) {
1076 dev_err(&pdev->dev, "no IRQ found\n");
1077 return irq;
1078 }
1079
1080 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1081 dev_name(&pdev->dev), mmu);
1082 if (ret < 0) {
1083 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1084 return ret;
1085 }
1086
1087 ipmmu_device_reset(mmu);
2ae86955
YS
1088
1089 if (mmu->features->reserved_context) {
1090 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1091 set_bit(0, mmu->ctx);
1092 }
fd5140e2 1093 }
d25a2a16 1094
cda52fcd
MD
1095 /*
1096 * Register the IPMMU to the IOMMU subsystem in the following cases:
1097 * - R-Car Gen2 IPMMU (all devices registered)
1098 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1099 */
1100 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1101 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1102 dev_name(&pdev->dev));
1103 if (ret)
1104 return ret;
7af9a5fd 1105
cda52fcd
MD
1106 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1107 iommu_device_set_fwnode(&mmu->iommu,
1108 &pdev->dev.of_node->fwnode);
01da21e5 1109
cda52fcd
MD
1110 ret = iommu_device_register(&mmu->iommu);
1111 if (ret)
1112 return ret;
1113
1114#if defined(CONFIG_IOMMU_DMA)
1115 if (!iommu_present(&platform_bus_type))
1116 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1117#endif
1118 }
01da21e5 1119
d25a2a16
LP
1120 /*
1121 * We can't create the ARM mapping here as it requires the bus to have
1122 * an IOMMU, which only happens when bus_set_iommu() is called in
1123 * ipmmu_init() after the probe function returns.
1124 */
1125
d25a2a16
LP
1126 platform_set_drvdata(pdev, mmu);
1127
1128 return 0;
1129}
1130
1131static int ipmmu_remove(struct platform_device *pdev)
1132{
1133 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1134
7af9a5fd 1135 iommu_device_sysfs_remove(&mmu->iommu);
01da21e5
MD
1136 iommu_device_unregister(&mmu->iommu);
1137
d25a2a16
LP
1138 arm_iommu_release_mapping(mmu->mapping);
1139
1140 ipmmu_device_reset(mmu);
1141
1142 return 0;
1143}
1144
da38e9ec
GU
1145#ifdef CONFIG_PM_SLEEP
1146static int ipmmu_resume_noirq(struct device *dev)
1147{
1148 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1149 unsigned int i;
1150
1151 /* Reset root MMU and restore contexts */
1152 if (ipmmu_is_root(mmu)) {
1153 ipmmu_device_reset(mmu);
1154
1155 for (i = 0; i < mmu->num_ctx; i++) {
1156 if (!mmu->domains[i])
1157 continue;
1158
1159 ipmmu_domain_setup_context(mmu->domains[i]);
1160 }
1161 }
1162
1163 /* Re-enable active micro-TLBs */
1164 for (i = 0; i < mmu->features->num_utlbs; i++) {
1165 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1166 continue;
1167
1168 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1169 }
1170
1171 return 0;
1172}
1173
1174static const struct dev_pm_ops ipmmu_pm = {
1175 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1176};
1177#define DEV_PM_OPS &ipmmu_pm
1178#else
1179#define DEV_PM_OPS NULL
1180#endif /* CONFIG_PM_SLEEP */
1181
d25a2a16
LP
1182static struct platform_driver ipmmu_driver = {
1183 .driver = {
d25a2a16 1184 .name = "ipmmu-vmsa",
275f5053 1185 .of_match_table = of_match_ptr(ipmmu_of_ids),
da38e9ec 1186 .pm = DEV_PM_OPS,
d25a2a16
LP
1187 },
1188 .probe = ipmmu_probe,
1189 .remove = ipmmu_remove,
1190};
1191
1192static int __init ipmmu_init(void)
1193{
5c5c8741 1194 struct device_node *np;
cda52fcd 1195 static bool setup_done;
d25a2a16
LP
1196 int ret;
1197
cda52fcd
MD
1198 if (setup_done)
1199 return 0;
1200
5c5c8741
DO
1201 np = of_find_matching_node(NULL, ipmmu_of_ids);
1202 if (!np)
1203 return 0;
1204
1205 of_node_put(np);
1206
d25a2a16
LP
1207 ret = platform_driver_register(&ipmmu_driver);
1208 if (ret < 0)
1209 return ret;
1210
cda52fcd 1211#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
d25a2a16
LP
1212 if (!iommu_present(&platform_bus_type))
1213 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
cda52fcd 1214#endif
d25a2a16 1215
cda52fcd 1216 setup_done = true;
d25a2a16
LP
1217 return 0;
1218}
d25a2a16 1219subsys_initcall(ipmmu_init);