Merge tag 'pwm/for-6.7-rc5-fixes' of https://git.pengutronix.de/git/ukl/linux
[linux-2.6-block.git] / drivers / iommu / ipmmu-vmsa.c
CommitLineData
57d3f11c 1// SPDX-License-Identifier: GPL-2.0
d25a2a16 2/*
8128ac3b
PG
3 * IOMMU API for Renesas VMSA-compatible IPMMU
4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
d25a2a16 5 *
17fe1618 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation
d25a2a16
LP
7 */
8
dbb70692 9#include <linux/bitmap.h>
d25a2a16
LP
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/export.h>
8128ac3b 14#include <linux/init.h>
d25a2a16
LP
15#include <linux/interrupt.h>
16#include <linux/io.h>
aedd11e0 17#include <linux/iopoll.h>
b77cf11f 18#include <linux/io-pgtable.h>
d25a2a16 19#include <linux/iommu.h>
275f5053 20#include <linux/of.h>
7b2d5961 21#include <linux/of_platform.h>
52a8fd24 22#include <linux/pci.h>
d25a2a16
LP
23#include <linux/platform_device.h>
24#include <linux/sizes.h>
25#include <linux/slab.h>
58b8e8bf 26#include <linux/sys_soc.h>
d25a2a16 27
3ae47292 28#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
d25a2a16 29#include <asm/dma-iommu.h>
49c875f0
RM
30#else
31#define arm_iommu_create_mapping(...) NULL
32#define arm_iommu_attach_device(...) -ENODEV
33#define arm_iommu_release_mapping(...) do {} while (0)
3ae47292 34#endif
d25a2a16 35
7a62ced8 36#define IPMMU_CTX_MAX 16U
da38e9ec
GU
37#define IPMMU_CTX_INVALID -1
38
7a62ced8 39#define IPMMU_UTLB_MAX 64U
dbb70692 40
33f3ac9b
MD
41struct ipmmu_features {
42 bool use_ns_alias_offset;
fd5140e2 43 bool has_cache_leaf_nodes;
5fd16341 44 unsigned int number_of_contexts;
b7f3f047 45 unsigned int num_utlbs;
f5c85891 46 bool setup_imbuscr;
c295f504 47 bool twobit_imttbcr_sl0;
2ae86955 48 bool reserved_context;
3623002f 49 bool cache_snoop;
3dc28d9f
YS
50 unsigned int ctx_offset_base;
51 unsigned int ctx_offset_stride;
1289f7f1 52 unsigned int utlb_offset_base;
33f3ac9b
MD
53};
54
d25a2a16
LP
55struct ipmmu_vmsa_device {
56 struct device *dev;
57 void __iomem *base;
01da21e5 58 struct iommu_device iommu;
fd5140e2 59 struct ipmmu_vmsa_device *root;
33f3ac9b 60 const struct ipmmu_features *features;
5fd16341 61 unsigned int num_ctx;
dbb70692
MD
62 spinlock_t lock; /* Protects ctx and domains[] */
63 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
64 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
da38e9ec 65 s8 utlb_ctx[IPMMU_UTLB_MAX];
d25a2a16
LP
66
67 struct dma_iommu_mapping *mapping;
68};
69
70struct ipmmu_vmsa_domain {
71 struct ipmmu_vmsa_device *mmu;
5914c5fd 72 struct iommu_domain io_domain;
d25a2a16 73
f20ed39f
LP
74 struct io_pgtable_cfg cfg;
75 struct io_pgtable_ops *iop;
76
d25a2a16 77 unsigned int context_id;
46583e8c 78 struct mutex mutex; /* Protects mappings */
d25a2a16
LP
79};
80
5914c5fd
JR
81static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
82{
83 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
84}
85
e4efe4a9 86static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
0fbc8b04 87{
be568d6d 88 return dev_iommu_priv_get(dev);
0fbc8b04
MD
89}
90
d25a2a16
LP
91#define TLB_LOOP_TIMEOUT 100 /* 100us */
92
93/* -----------------------------------------------------------------------------
94 * Registers Definition
95 */
96
275f5053
LP
97#define IM_NS_ALIAS_OFFSET 0x800
98
df9828aa
YS
99/* MMU "context" registers */
100#define IMCTR 0x0000 /* R-Car Gen2/3 */
101#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
102#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
103#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
104
105#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
106#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
3623002f 107#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
3623002f 108#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
3623002f 109#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
5ca54fdc 110#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
df9828aa 111#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
d25a2a16 112
df9828aa
YS
113#define IMBUSCR 0x000c /* R-Car Gen2 only */
114#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
115#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
d25a2a16 116
df9828aa
YS
117#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
118#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
d25a2a16 119
df9828aa
YS
120#define IMSTR 0x0020 /* R-Car Gen2/3 */
121#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
122#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
123#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
124#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
d25a2a16 125
df9828aa 126#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
d25a2a16 127
df9828aa
YS
128#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
129#define IMEUAR 0x0034 /* R-Car Gen3 only */
d25a2a16 130
df9828aa 131/* uTLB registers */
ddbbddd7 132#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
df9828aa
YS
133#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
134#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
135#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
136#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
137#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
d25a2a16 138
ddbbddd7 139#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
df9828aa
YS
140#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
141#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
d25a2a16 142
fd5140e2
MD
143/* -----------------------------------------------------------------------------
144 * Root device handling
145 */
146
147static struct platform_driver ipmmu_driver;
148
149static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
150{
151 return mmu->root == mmu;
152}
153
154static int __ipmmu_check_device(struct device *dev, void *data)
155{
156 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
157 struct ipmmu_vmsa_device **rootp = data;
158
159 if (ipmmu_is_root(mmu))
160 *rootp = mmu;
161
162 return 0;
163}
164
165static struct ipmmu_vmsa_device *ipmmu_find_root(void)
166{
167 struct ipmmu_vmsa_device *root = NULL;
168
169 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
170 __ipmmu_check_device) == 0 ? root : NULL;
171}
172
d25a2a16
LP
173/* -----------------------------------------------------------------------------
174 * Read/Write Access
175 */
176
177static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
178{
179 return ioread32(mmu->base + offset);
180}
181
182static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
183 u32 data)
184{
185 iowrite32(data, mmu->base + offset);
186}
187
16d9454f
YS
188static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
189 unsigned int context_id, unsigned int reg)
190{
7a62ced8
YS
191 unsigned int base = mmu->features->ctx_offset_base;
192
193 if (context_id > 7)
194 base += 0x800 - 8 * 0x40;
195
196 return base + context_id * mmu->features->ctx_offset_stride + reg;
16d9454f
YS
197}
198
199static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
200 unsigned int context_id, unsigned int reg)
201{
202 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
203}
204
205static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
206 unsigned int context_id, unsigned int reg, u32 data)
207{
208 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
209}
210
d574893a
MD
211static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
212 unsigned int reg)
d25a2a16 213{
16d9454f 214 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
d25a2a16
LP
215}
216
d574893a
MD
217static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
218 unsigned int reg, u32 data)
d25a2a16 219{
16d9454f 220 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
d25a2a16
LP
221}
222
d574893a
MD
223static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
224 unsigned int reg, u32 data)
225{
226 if (domain->mmu != domain->mmu->root)
16d9454f 227 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
d574893a 228
16d9454f 229 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
d574893a
MD
230}
231
3667c997
YS
232static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
233{
1289f7f1 234 return mmu->features->utlb_offset_base + reg;
3667c997
YS
235}
236
237static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
238 unsigned int utlb, u32 data)
239{
240 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
241}
d574893a 242
3667c997
YS
243static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
244 unsigned int utlb, u32 data)
245{
246 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
d574893a
MD
247}
248
d25a2a16
LP
249/* -----------------------------------------------------------------------------
250 * TLB and microTLB Management
251 */
252
253/* Wait for any pending TLB invalidations to complete */
254static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
255{
aedd11e0 256 u32 val;
d25a2a16 257
aedd11e0
GU
258 if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val,
259 !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT,
260 false, domain, IMCTR))
261 dev_err_ratelimited(domain->mmu->dev,
d25a2a16 262 "TLB sync timed out -- MMU may be deadlocked\n");
d25a2a16
LP
263}
264
265static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
266{
267 u32 reg;
268
d574893a 269 reg = ipmmu_ctx_read_root(domain, IMCTR);
d25a2a16 270 reg |= IMCTR_FLUSH;
d574893a 271 ipmmu_ctx_write_all(domain, IMCTR, reg);
d25a2a16
LP
272
273 ipmmu_tlb_sync(domain);
274}
275
276/*
277 * Enable MMU translation for the microTLB.
278 */
279static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
192d2045 280 unsigned int utlb)
d25a2a16
LP
281{
282 struct ipmmu_vmsa_device *mmu = domain->mmu;
283
192d2045
LP
284 /*
285 * TODO: Reference-count the microTLB as several bus masters can be
286 * connected to the same microTLB.
287 */
288
d25a2a16 289 /* TODO: What should we set the ASID to ? */
3667c997 290 ipmmu_imuasid_write(mmu, utlb, 0);
d25a2a16 291 /* TODO: Do we need to flush the microTLB ? */
3667c997
YS
292 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
293 IMUCTR_FLUSH | IMUCTR_MMUEN);
da38e9ec 294 mmu->utlb_ctx[utlb] = domain->context_id;
d25a2a16
LP
295}
296
666c9f1e
JG
297/*
298 * Disable MMU translation for the microTLB.
299 */
300static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
301 unsigned int utlb)
302{
303 struct ipmmu_vmsa_device *mmu = domain->mmu;
304
305 ipmmu_imuctr_write(mmu, utlb, 0);
306 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
307}
308
f20ed39f 309static void ipmmu_tlb_flush_all(void *cookie)
d25a2a16 310{
f20ed39f
LP
311 struct ipmmu_vmsa_domain *domain = cookie;
312
313 ipmmu_tlb_invalidate(domain);
314}
315
05aed941
WD
316static void ipmmu_tlb_flush(unsigned long iova, size_t size,
317 size_t granule, void *cookie)
f20ed39f 318{
05aed941 319 ipmmu_tlb_flush_all(cookie);
f20ed39f
LP
320}
321
298f7889 322static const struct iommu_flush_ops ipmmu_flush_ops = {
f20ed39f 323 .tlb_flush_all = ipmmu_tlb_flush_all,
05aed941 324 .tlb_flush_walk = ipmmu_tlb_flush,
f20ed39f
LP
325};
326
d25a2a16
LP
327/* -----------------------------------------------------------------------------
328 * Domain/Context Management
329 */
330
dbb70692
MD
331static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
332 struct ipmmu_vmsa_domain *domain)
333{
334 unsigned long flags;
335 int ret;
336
337 spin_lock_irqsave(&mmu->lock, flags);
338
5fd16341
MD
339 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
340 if (ret != mmu->num_ctx) {
dbb70692
MD
341 mmu->domains[ret] = domain;
342 set_bit(ret, mmu->ctx);
5fd16341
MD
343 } else
344 ret = -EBUSY;
dbb70692
MD
345
346 spin_unlock_irqrestore(&mmu->lock, flags);
347
348 return ret;
349}
350
a175a67d
OT
351static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
352 unsigned int context_id)
353{
354 unsigned long flags;
355
356 spin_lock_irqsave(&mmu->lock, flags);
357
358 clear_bit(context_id, mmu->ctx);
359 mmu->domains[context_id] = NULL;
360
361 spin_unlock_irqrestore(&mmu->lock, flags);
362}
363
892db541 364static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
d25a2a16 365{
f64232ee 366 u64 ttbr;
c295f504 367 u32 tmp;
a175a67d 368
d25a2a16 369 /* TTBR0 */
d1e5f26f 370 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
d574893a
MD
371 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
372 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
d25a2a16
LP
373
374 /*
375 * TTBCR
3623002f
HNP
376 * We use long descriptors and allocate the whole 32-bit VA space to
377 * TTBR0.
d25a2a16 378 */
c295f504
MD
379 if (domain->mmu->features->twobit_imttbcr_sl0)
380 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
381 else
382 tmp = IMTTBCR_SL0_LVL_1;
383
3623002f
HNP
384 if (domain->mmu->features->cache_snoop)
385 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
386 IMTTBCR_IRGN0_WB_WA;
387
388 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
d25a2a16 389
f20ed39f 390 /* MAIR0 */
d574893a 391 ipmmu_ctx_write_root(domain, IMMAIR0,
205577ab 392 domain->cfg.arm_lpae_s1_cfg.mair);
d25a2a16
LP
393
394 /* IMBUSCR */
f5c85891
MD
395 if (domain->mmu->features->setup_imbuscr)
396 ipmmu_ctx_write_root(domain, IMBUSCR,
397 ipmmu_ctx_read_root(domain, IMBUSCR) &
398 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
d25a2a16
LP
399
400 /*
401 * IMSTR
402 * Clear all interrupt flags.
403 */
d574893a 404 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
d25a2a16
LP
405
406 /*
407 * IMCTR
408 * Enable the MMU and interrupt generation. The long-descriptor
409 * translation table format doesn't use TEX remapping. Don't enable AF
410 * software management as we have no use for it. Flush the TLB as
411 * required when modifying the context registers.
412 */
d574893a
MD
413 ipmmu_ctx_write_all(domain, IMCTR,
414 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
892db541
GU
415}
416
417static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
418{
419 int ret;
420
421 /*
422 * Allocate the page table operations.
423 *
424 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
425 * access, Long-descriptor format" that the NStable bit being set in a
426 * table descriptor will result in the NStable and NS bits of all child
427 * entries being ignored and considered as being set. The IPMMU seems
428 * not to comply with this, as it generates a secure access page fault
429 * if any of the NStable and NS bits isn't set when running in
430 * non-secure mode.
431 */
432 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
433 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
434 domain->cfg.ias = 32;
435 domain->cfg.oas = 40;
298f7889 436 domain->cfg.tlb = &ipmmu_flush_ops;
892db541
GU
437 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
438 domain->io_domain.geometry.force_aperture = true;
439 /*
440 * TODO: Add support for coherent walk through CCI with DVM and remove
441 * cache handling. For now, delegate it to the io-pgtable code.
442 */
3430abd6 443 domain->cfg.coherent_walk = false;
892db541
GU
444 domain->cfg.iommu_dev = domain->mmu->root->dev;
445
446 /*
447 * Find an unused context.
448 */
449 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
450 if (ret < 0)
451 return ret;
452
453 domain->context_id = ret;
454
455 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
456 domain);
457 if (!domain->iop) {
458 ipmmu_domain_free_context(domain->mmu->root,
459 domain->context_id);
460 return -EINVAL;
461 }
d25a2a16 462
892db541 463 ipmmu_domain_setup_context(domain);
d25a2a16
LP
464 return 0;
465}
466
467static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
468{
e5b78f2e
GU
469 if (!domain->mmu)
470 return;
471
d25a2a16
LP
472 /*
473 * Disable the context. Flush the TLB as required when modifying the
474 * context registers.
475 *
476 * TODO: Is TLB flush really needed ?
477 */
d574893a 478 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
d25a2a16 479 ipmmu_tlb_sync(domain);
fd5140e2 480 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
d25a2a16
LP
481}
482
483/* -----------------------------------------------------------------------------
484 * Fault Handling
485 */
486
487static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
488{
489 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
490 struct ipmmu_vmsa_device *mmu = domain->mmu;
82576aa8 491 unsigned long iova;
d25a2a16 492 u32 status;
d25a2a16 493
d574893a 494 status = ipmmu_ctx_read_root(domain, IMSTR);
d25a2a16
LP
495 if (!(status & err_mask))
496 return IRQ_NONE;
497
82576aa8
GU
498 iova = ipmmu_ctx_read_root(domain, IMELAR);
499 if (IS_ENABLED(CONFIG_64BIT))
500 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
d25a2a16
LP
501
502 /*
503 * Clear the error status flags. Unlike traditional interrupt flag
504 * registers that must be cleared by writing 1, this status register
505 * seems to require 0. The error address register must be read before,
506 * otherwise its value will be 0.
507 */
d574893a 508 ipmmu_ctx_write_root(domain, IMSTR, 0);
d25a2a16
LP
509
510 /* Log fatal errors. */
511 if (status & IMSTR_MHIT)
82576aa8 512 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
d25a2a16
LP
513 iova);
514 if (status & IMSTR_ABORT)
82576aa8 515 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
d25a2a16
LP
516 iova);
517
518 if (!(status & (IMSTR_PF | IMSTR_TF)))
519 return IRQ_NONE;
520
521 /*
522 * Try to handle page faults and translation faults.
523 *
524 * TODO: We need to look up the faulty device based on the I/O VA. Use
525 * the IOMMU device for now.
526 */
5914c5fd 527 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
d25a2a16
LP
528 return IRQ_HANDLED;
529
530 dev_err_ratelimited(mmu->dev,
82576aa8 531 "Unhandled fault: status 0x%08x iova 0x%lx\n",
d25a2a16
LP
532 status, iova);
533
534 return IRQ_HANDLED;
535}
536
537static irqreturn_t ipmmu_irq(int irq, void *dev)
538{
539 struct ipmmu_vmsa_device *mmu = dev;
dbb70692
MD
540 irqreturn_t status = IRQ_NONE;
541 unsigned int i;
542 unsigned long flags;
d25a2a16 543
dbb70692
MD
544 spin_lock_irqsave(&mmu->lock, flags);
545
546 /*
547 * Check interrupts for all active contexts.
548 */
5fd16341 549 for (i = 0; i < mmu->num_ctx; i++) {
dbb70692
MD
550 if (!mmu->domains[i])
551 continue;
552 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
553 status = IRQ_HANDLED;
554 }
d25a2a16 555
dbb70692 556 spin_unlock_irqrestore(&mmu->lock, flags);
d25a2a16 557
dbb70692 558 return status;
d25a2a16
LP
559}
560
d25a2a16
LP
561/* -----------------------------------------------------------------------------
562 * IOMMU Operations
563 */
564
3529375e 565static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev)
d25a2a16
LP
566{
567 struct ipmmu_vmsa_domain *domain;
568
569 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
570 if (!domain)
5914c5fd 571 return NULL;
d25a2a16 572
46583e8c 573 mutex_init(&domain->mutex);
d25a2a16 574
5914c5fd 575 return &domain->io_domain;
d25a2a16
LP
576}
577
5914c5fd 578static void ipmmu_domain_free(struct iommu_domain *io_domain)
d25a2a16 579{
5914c5fd 580 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16
LP
581
582 /*
583 * Free the domain resources. We assume that all devices have already
584 * been detached.
585 */
586 ipmmu_domain_destroy_context(domain);
f20ed39f 587 free_io_pgtable_ops(domain->iop);
d25a2a16
LP
588 kfree(domain);
589}
590
591static int ipmmu_attach_device(struct iommu_domain *io_domain,
592 struct device *dev)
593{
df903655 594 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
e4efe4a9 595 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
5914c5fd 596 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
a166d31e 597 unsigned int i;
d25a2a16
LP
598 int ret = 0;
599
e4efe4a9 600 if (!mmu) {
d25a2a16
LP
601 dev_err(dev, "Cannot attach to IPMMU\n");
602 return -ENXIO;
603 }
604
46583e8c 605 mutex_lock(&domain->mutex);
d25a2a16
LP
606
607 if (!domain->mmu) {
608 /* The domain hasn't been used yet, initialize it. */
609 domain->mmu = mmu;
610 ret = ipmmu_domain_init_context(domain);
5fd16341
MD
611 if (ret < 0) {
612 dev_err(dev, "Unable to initialize IPMMU context\n");
613 domain->mmu = NULL;
614 } else {
615 dev_info(dev, "Using IPMMU context %u\n",
616 domain->context_id);
617 }
d25a2a16
LP
618 } else if (domain->mmu != mmu) {
619 /*
620 * Something is wrong, we can't attach two devices using
621 * different IOMMUs to the same domain.
622 */
d25a2a16 623 ret = -EINVAL;
3ae47292
MD
624 } else
625 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
d25a2a16 626
46583e8c 627 mutex_unlock(&domain->mutex);
d25a2a16
LP
628
629 if (ret < 0)
630 return ret;
631
7b2d5961
MD
632 for (i = 0; i < fwspec->num_ids; ++i)
633 ipmmu_utlb_enable(domain, fwspec->ids[i]);
d25a2a16
LP
634
635 return 0;
636}
637
666c9f1e
JG
638static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain,
639 struct device *dev)
640{
641 struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev);
642 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
643 struct ipmmu_vmsa_domain *domain;
644 unsigned int i;
645
646 if (io_domain == identity_domain || !io_domain)
647 return 0;
648
649 domain = to_vmsa_domain(io_domain);
650 for (i = 0; i < fwspec->num_ids; ++i)
651 ipmmu_utlb_disable(domain, fwspec->ids[i]);
652
653 /*
654 * TODO: Optimize by disabling the context when no device is attached.
655 */
656 return 0;
657}
658
659static struct iommu_domain_ops ipmmu_iommu_identity_ops = {
660 .attach_dev = ipmmu_iommu_identity_attach,
661};
662
663static struct iommu_domain ipmmu_iommu_identity_domain = {
664 .type = IOMMU_DOMAIN_IDENTITY,
665 .ops = &ipmmu_iommu_identity_ops,
666};
667
d25a2a16 668static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
0a17bbab
RM
669 phys_addr_t paddr, size_t pgsize, size_t pgcount,
670 int prot, gfp_t gfp, size_t *mapped)
d25a2a16 671{
5914c5fd 672 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16 673
0a17bbab
RM
674 return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount,
675 prot, gfp, mapped);
d25a2a16
LP
676}
677
678static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
0a17bbab
RM
679 size_t pgsize, size_t pgcount,
680 struct iommu_iotlb_gather *gather)
d25a2a16 681{
5914c5fd 682 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16 683
0a17bbab 684 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather);
d25a2a16
LP
685}
686
56f8af5e 687static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
32b12449
RM
688{
689 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
690
691 if (domain->mmu)
692 ipmmu_tlb_flush_all(domain);
693}
694
56f8af5e
WD
695static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
696 struct iommu_iotlb_gather *gather)
697{
698 ipmmu_flush_iotlb_all(io_domain);
699}
700
d25a2a16
LP
701static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
702 dma_addr_t iova)
703{
5914c5fd 704 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
d25a2a16
LP
705
706 /* TODO: Is locking needed ? */
707
f20ed39f 708 return domain->iop->iova_to_phys(domain->iop, iova);
d25a2a16
LP
709}
710
7b2d5961
MD
711static int ipmmu_init_platform_device(struct device *dev,
712 struct of_phandle_args *args)
d25a2a16 713{
7b2d5961 714 struct platform_device *ipmmu_pdev;
bb590c90 715
7b2d5961
MD
716 ipmmu_pdev = of_find_device_by_node(args->np);
717 if (!ipmmu_pdev)
bb590c90
LP
718 return -ENODEV;
719
be568d6d 720 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
383fef5f 721
383fef5f 722 return 0;
58b8e8bf
MD
723}
724
815cdd86
YS
725static const struct soc_device_attribute soc_needs_opt_in[] = {
726 { .family = "R-Car Gen3", },
ae684caf 727 { .family = "R-Car Gen4", },
815cdd86 728 { .family = "RZ/G2", },
58b8e8bf
MD
729 { /* sentinel */ }
730};
731
815cdd86
YS
732static const struct soc_device_attribute soc_denylist[] = {
733 { .soc_id = "r8a774a1", },
815cdd86
YS
734 { .soc_id = "r8a7795", .revision = "ES2.*" },
735 { .soc_id = "r8a7796", },
58b8e8bf
MD
736 { /* sentinel */ }
737};
738
815cdd86 739static const char * const devices_allowlist[] = {
cec0813d
YS
740 "ee100000.mmc",
741 "ee120000.mmc",
742 "ee140000.mmc",
743 "ee160000.mmc"
80759649
YS
744};
745
815cdd86 746static bool ipmmu_device_is_allowed(struct device *dev)
b7ee92c6 747{
80759649
YS
748 unsigned int i;
749
b7ee92c6 750 /*
ae684caf 751 * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices.
b7ee92c6
YS
752 * For Other SoCs, this returns true anyway.
753 */
815cdd86 754 if (!soc_device_match(soc_needs_opt_in))
b7ee92c6
YS
755 return true;
756
815cdd86
YS
757 /* Check whether this SoC can use the IPMMU correctly or not */
758 if (soc_device_match(soc_denylist))
b7ee92c6
YS
759 return false;
760
52a8fd24
YS
761 /* Check whether this device is a PCI device */
762 if (dev_is_pci(dev))
763 return true;
764
815cdd86
YS
765 /* Check whether this device can work with the IPMMU */
766 for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
767 if (!strcmp(dev_name(dev), devices_allowlist[i]))
80759649
YS
768 return true;
769 }
770
771 /* Otherwise, do not allow use of IPMMU */
b7ee92c6
YS
772 return false;
773}
774
49558da0
MD
775static int ipmmu_of_xlate(struct device *dev,
776 struct of_phandle_args *spec)
777{
815cdd86 778 if (!ipmmu_device_is_allowed(dev))
58b8e8bf
MD
779 return -ENODEV;
780
7b2d5961
MD
781 iommu_fwspec_add_ids(dev, spec->args, 1);
782
49558da0 783 /* Initialize once - xlate() will call multiple times */
e4efe4a9 784 if (to_ipmmu(dev))
49558da0
MD
785 return 0;
786
7b2d5961 787 return ipmmu_init_platform_device(dev, spec);
49558da0
MD
788}
789
49c875f0 790static int ipmmu_init_arm_mapping(struct device *dev)
383fef5f 791{
e4efe4a9 792 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
383fef5f
MD
793 int ret;
794
d25a2a16
LP
795 /*
796 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
797 * VAs. This will allocate a corresponding IOMMU domain.
798 *
799 * TODO:
800 * - Create one mapping per context (TLB).
801 * - Make the mapping size configurable ? We currently use a 2GB mapping
802 * at a 1GB offset to ensure that NULL VAs will fault.
803 */
804 if (!mmu->mapping) {
805 struct dma_iommu_mapping *mapping;
806
807 mapping = arm_iommu_create_mapping(&platform_bus_type,
720b0cef 808 SZ_1G, SZ_2G);
d25a2a16
LP
809 if (IS_ERR(mapping)) {
810 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
b8f80bff
LP
811 ret = PTR_ERR(mapping);
812 goto error;
d25a2a16
LP
813 }
814
815 mmu->mapping = mapping;
816 }
817
818 /* Attach the ARM VA mapping to the device. */
819 ret = arm_iommu_attach_device(dev, mmu->mapping);
820 if (ret < 0) {
821 dev_err(dev, "Failed to attach device to VA mapping\n");
822 goto error;
823 }
824
825 return 0;
826
827error:
49c875f0 828 if (mmu->mapping)
383fef5f 829 arm_iommu_release_mapping(mmu->mapping);
a166d31e 830
d25a2a16
LP
831 return ret;
832}
833
6580c8a7 834static struct iommu_device *ipmmu_probe_device(struct device *dev)
3ae47292 835{
80eaa9f5 836 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
3ae47292 837
0fbc8b04
MD
838 /*
839 * Only let through devices that have been verified in xlate()
0fbc8b04 840 */
80eaa9f5 841 if (!mmu)
6580c8a7 842 return ERR_PTR(-ENODEV);
3ae47292 843
6580c8a7
JR
844 return &mmu->iommu;
845}
846
847static void ipmmu_probe_finalize(struct device *dev)
848{
849 int ret = 0;
850
851 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
80eaa9f5 852 ret = ipmmu_init_arm_mapping(dev);
3ae47292 853
6580c8a7
JR
854 if (ret)
855 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
3ae47292
MD
856}
857
6580c8a7 858static void ipmmu_release_device(struct device *dev)
3ae47292 859{
24dfb197
LB
860 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
861 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
862 unsigned int i;
863
864 for (i = 0; i < fwspec->num_ids; ++i) {
865 unsigned int utlb = fwspec->ids[i];
866
867 ipmmu_imuctr_write(mmu, utlb, 0);
868 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
869 }
870
871 arm_iommu_release_mapping(mmu->mapping);
3ae47292
MD
872}
873
3ae47292 874static const struct iommu_ops ipmmu_ops = {
666c9f1e 875 .identity_domain = &ipmmu_iommu_identity_domain,
3529375e 876 .domain_alloc_paging = ipmmu_domain_alloc_paging,
6580c8a7
JR
877 .probe_device = ipmmu_probe_device,
878 .release_device = ipmmu_release_device,
879 .probe_finalize = ipmmu_probe_finalize,
8f68911e
JG
880 /*
881 * FIXME: The device grouping is a fixed property of the hardware's
882 * ability to isolate and control DMA, it should not depend on kconfig.
883 */
2ba20b5a 884 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
8f68911e 885 ? generic_device_group : generic_single_device_group,
3ae47292 886 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
49558da0 887 .of_xlate = ipmmu_of_xlate,
9a630a4b
LB
888 .default_domain_ops = &(const struct iommu_domain_ops) {
889 .attach_dev = ipmmu_attach_device,
0a17bbab
RM
890 .map_pages = ipmmu_map,
891 .unmap_pages = ipmmu_unmap,
9a630a4b
LB
892 .flush_iotlb_all = ipmmu_flush_iotlb_all,
893 .iotlb_sync = ipmmu_iotlb_sync,
894 .iova_to_phys = ipmmu_iova_to_phys,
895 .free = ipmmu_domain_free,
896 }
3ae47292
MD
897};
898
d25a2a16
LP
899/* -----------------------------------------------------------------------------
900 * Probe/remove and init
901 */
902
903static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
904{
905 unsigned int i;
906
907 /* Disable all contexts. */
5fd16341 908 for (i = 0; i < mmu->num_ctx; ++i)
16d9454f 909 ipmmu_ctx_write(mmu, i, IMCTR, 0);
d25a2a16
LP
910}
911
33f3ac9b
MD
912static const struct ipmmu_features ipmmu_features_default = {
913 .use_ns_alias_offset = true,
fd5140e2 914 .has_cache_leaf_nodes = false,
5fd16341 915 .number_of_contexts = 1, /* software only tested with one context */
b7f3f047 916 .num_utlbs = 32,
f5c85891 917 .setup_imbuscr = true,
c295f504 918 .twobit_imttbcr_sl0 = false,
2ae86955 919 .reserved_context = false,
3623002f 920 .cache_snoop = true,
3dc28d9f
YS
921 .ctx_offset_base = 0,
922 .ctx_offset_stride = 0x40,
1289f7f1 923 .utlb_offset_base = 0,
33f3ac9b
MD
924};
925
0b8ac140 926static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
58b8e8bf
MD
927 .use_ns_alias_offset = false,
928 .has_cache_leaf_nodes = true,
929 .number_of_contexts = 8,
b7f3f047 930 .num_utlbs = 48,
58b8e8bf
MD
931 .setup_imbuscr = false,
932 .twobit_imttbcr_sl0 = true,
2ae86955 933 .reserved_context = true,
3623002f 934 .cache_snoop = false,
3dc28d9f
YS
935 .ctx_offset_base = 0,
936 .ctx_offset_stride = 0x40,
1289f7f1 937 .utlb_offset_base = 0,
58b8e8bf
MD
938};
939
ae684caf 940static const struct ipmmu_features ipmmu_features_rcar_gen4 = {
7a62ced8
YS
941 .use_ns_alias_offset = false,
942 .has_cache_leaf_nodes = true,
943 .number_of_contexts = 16,
944 .num_utlbs = 64,
945 .setup_imbuscr = false,
946 .twobit_imttbcr_sl0 = true,
947 .reserved_context = true,
948 .cache_snoop = false,
949 .ctx_offset_base = 0x10000,
950 .ctx_offset_stride = 0x1040,
951 .utlb_offset_base = 0x3000,
952};
953
33f3ac9b
MD
954static const struct of_device_id ipmmu_of_ids[] = {
955 {
956 .compatible = "renesas,ipmmu-vmsa",
957 .data = &ipmmu_features_default,
60fb0083
FC
958 }, {
959 .compatible = "renesas,ipmmu-r8a774a1",
960 .data = &ipmmu_features_rcar_gen3,
757f26a3
BD
961 }, {
962 .compatible = "renesas,ipmmu-r8a774b1",
963 .data = &ipmmu_features_rcar_gen3,
b6d39cd8
FC
964 }, {
965 .compatible = "renesas,ipmmu-r8a774c0",
966 .data = &ipmmu_features_rcar_gen3,
4b2aa7a6
MCR
967 }, {
968 .compatible = "renesas,ipmmu-r8a774e1",
969 .data = &ipmmu_features_rcar_gen3,
58b8e8bf
MD
970 }, {
971 .compatible = "renesas,ipmmu-r8a7795",
0b8ac140
MD
972 .data = &ipmmu_features_rcar_gen3,
973 }, {
974 .compatible = "renesas,ipmmu-r8a7796",
975 .data = &ipmmu_features_rcar_gen3,
17fe1618
YS
976 }, {
977 .compatible = "renesas,ipmmu-r8a77961",
978 .data = &ipmmu_features_rcar_gen3,
98dbffd3
JM
979 }, {
980 .compatible = "renesas,ipmmu-r8a77965",
981 .data = &ipmmu_features_rcar_gen3,
3701c123
SH
982 }, {
983 .compatible = "renesas,ipmmu-r8a77970",
984 .data = &ipmmu_features_rcar_gen3,
1cdeb52e
NY
985 }, {
986 .compatible = "renesas,ipmmu-r8a77980",
987 .data = &ipmmu_features_rcar_gen3,
b0c32912
HNP
988 }, {
989 .compatible = "renesas,ipmmu-r8a77990",
990 .data = &ipmmu_features_rcar_gen3,
3701c123
SH
991 }, {
992 .compatible = "renesas,ipmmu-r8a77995",
993 .data = &ipmmu_features_rcar_gen3,
7a62ced8
YS
994 }, {
995 .compatible = "renesas,ipmmu-r8a779a0",
ae684caf
YS
996 .data = &ipmmu_features_rcar_gen4,
997 }, {
9f7d09fe 998 .compatible = "renesas,rcar-gen4-ipmmu-vmsa",
ae684caf 999 .data = &ipmmu_features_rcar_gen4,
33f3ac9b
MD
1000 }, {
1001 /* Terminator */
1002 },
1003};
1004
d25a2a16
LP
1005static int ipmmu_probe(struct platform_device *pdev)
1006{
1007 struct ipmmu_vmsa_device *mmu;
1008 struct resource *res;
1009 int irq;
1010 int ret;
1011
d25a2a16
LP
1012 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1013 if (!mmu) {
1014 dev_err(&pdev->dev, "cannot allocate device data\n");
1015 return -ENOMEM;
1016 }
1017
1018 mmu->dev = &pdev->dev;
dbb70692
MD
1019 spin_lock_init(&mmu->lock);
1020 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
33f3ac9b 1021 mmu->features = of_device_get_match_data(&pdev->dev);
da38e9ec 1022 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1fdbbfd5
JJ
1023 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1024 if (ret)
1025 return ret;
d25a2a16
LP
1026
1027 /* Map I/O memory and request IRQ. */
1028 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1029 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1030 if (IS_ERR(mmu->base))
1031 return PTR_ERR(mmu->base);
1032
275f5053
LP
1033 /*
1034 * The IPMMU has two register banks, for secure and non-secure modes.
1035 * The bank mapped at the beginning of the IPMMU address space
1036 * corresponds to the running mode of the CPU. When running in secure
1037 * mode the non-secure register bank is also available at an offset.
1038 *
1039 * Secure mode operation isn't clearly documented and is thus currently
1040 * not implemented in the driver. Furthermore, preliminary tests of
1041 * non-secure operation with the main register bank were not successful.
1042 * Offset the registers base unconditionally to point to the non-secure
1043 * alias space for now.
1044 */
33f3ac9b
MD
1045 if (mmu->features->use_ns_alias_offset)
1046 mmu->base += IM_NS_ALIAS_OFFSET;
275f5053 1047
b43e0d8a 1048 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
5fd16341 1049
fd5140e2
MD
1050 /*
1051 * Determine if this IPMMU instance is a root device by checking for
1052 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1053 */
1054 if (!mmu->features->has_cache_leaf_nodes ||
a6c9e387 1055 !of_property_present(pdev->dev.of_node, "renesas,ipmmu-main"))
fd5140e2
MD
1056 mmu->root = mmu;
1057 else
1058 mmu->root = ipmmu_find_root();
d25a2a16 1059
fd5140e2
MD
1060 /*
1061 * Wait until the root device has been registered for sure.
1062 */
1063 if (!mmu->root)
1064 return -EPROBE_DEFER;
1065
1066 /* Root devices have mandatory IRQs */
1067 if (ipmmu_is_root(mmu)) {
ec37d4e9 1068 irq = platform_get_irq(pdev, 0);
565d4542 1069 if (irq < 0)
fd5140e2 1070 return irq;
fd5140e2
MD
1071
1072 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1073 dev_name(&pdev->dev), mmu);
1074 if (ret < 0) {
1075 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1076 return ret;
1077 }
1078
1079 ipmmu_device_reset(mmu);
2ae86955
YS
1080
1081 if (mmu->features->reserved_context) {
1082 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1083 set_bit(0, mmu->ctx);
1084 }
fd5140e2 1085 }
d25a2a16 1086
cda52fcd
MD
1087 /*
1088 * Register the IPMMU to the IOMMU subsystem in the following cases:
1089 * - R-Car Gen2 IPMMU (all devices registered)
1090 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1091 */
1092 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1093 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1094 dev_name(&pdev->dev));
1095 if (ret)
1096 return ret;
7af9a5fd 1097
2d471b20 1098 ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
cda52fcd
MD
1099 if (ret)
1100 return ret;
cda52fcd 1101 }
01da21e5 1102
d25a2a16
LP
1103 /*
1104 * We can't create the ARM mapping here as it requires the bus to have
1105 * an IOMMU, which only happens when bus_set_iommu() is called in
1106 * ipmmu_init() after the probe function returns.
1107 */
1108
d25a2a16
LP
1109 platform_set_drvdata(pdev, mmu);
1110
1111 return 0;
1112}
1113
7471ea50 1114static void ipmmu_remove(struct platform_device *pdev)
d25a2a16
LP
1115{
1116 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1117
7af9a5fd 1118 iommu_device_sysfs_remove(&mmu->iommu);
01da21e5
MD
1119 iommu_device_unregister(&mmu->iommu);
1120
d25a2a16
LP
1121 arm_iommu_release_mapping(mmu->mapping);
1122
1123 ipmmu_device_reset(mmu);
d25a2a16
LP
1124}
1125
da38e9ec
GU
1126#ifdef CONFIG_PM_SLEEP
1127static int ipmmu_resume_noirq(struct device *dev)
1128{
1129 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1130 unsigned int i;
1131
1132 /* Reset root MMU and restore contexts */
1133 if (ipmmu_is_root(mmu)) {
1134 ipmmu_device_reset(mmu);
1135
1136 for (i = 0; i < mmu->num_ctx; i++) {
1137 if (!mmu->domains[i])
1138 continue;
1139
1140 ipmmu_domain_setup_context(mmu->domains[i]);
1141 }
1142 }
1143
1144 /* Re-enable active micro-TLBs */
1145 for (i = 0; i < mmu->features->num_utlbs; i++) {
1146 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1147 continue;
1148
1149 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1150 }
1151
1152 return 0;
1153}
1154
1155static const struct dev_pm_ops ipmmu_pm = {
1156 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1157};
1158#define DEV_PM_OPS &ipmmu_pm
1159#else
1160#define DEV_PM_OPS NULL
1161#endif /* CONFIG_PM_SLEEP */
1162
d25a2a16
LP
1163static struct platform_driver ipmmu_driver = {
1164 .driver = {
d25a2a16 1165 .name = "ipmmu-vmsa",
275f5053 1166 .of_match_table = of_match_ptr(ipmmu_of_ids),
da38e9ec 1167 .pm = DEV_PM_OPS,
d25a2a16
LP
1168 },
1169 .probe = ipmmu_probe,
7471ea50 1170 .remove_new = ipmmu_remove,
d25a2a16 1171};
b87d6d7f 1172builtin_platform_driver(ipmmu_driver);