iommu/tegra-smmu: Unwrap tegra_smmu_group_get
[linux-block.git] / drivers / iommu / tegra-smmu.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
7a31f6f4 2/*
89184651 3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
7a31f6f4
HD
4 */
5
804cb54c 6#include <linux/bitops.h>
d1313e78 7#include <linux/debugfs.h>
bc5e6dea 8#include <linux/err.h>
7a31f6f4 9#include <linux/iommu.h>
89184651 10#include <linux/kernel.h>
0760e8fa 11#include <linux/of.h>
89184651
TR
12#include <linux/of_device.h>
13#include <linux/platform_device.h>
14#include <linux/slab.h>
404d0b30 15#include <linux/spinlock.h>
461a6946 16#include <linux/dma-mapping.h>
306a7f91
TR
17
18#include <soc/tegra/ahb.h>
89184651 19#include <soc/tegra/mc.h>
7a31f6f4 20
7f4c9176
TR
21struct tegra_smmu_group {
22 struct list_head list;
1ea5440e 23 struct tegra_smmu *smmu;
7f4c9176
TR
24 const struct tegra_smmu_group_soc *soc;
25 struct iommu_group *group;
21d3c040 26 unsigned int swgroup;
7f4c9176
TR
27};
28
89184651
TR
29struct tegra_smmu {
30 void __iomem *regs;
31 struct device *dev;
e6bc5933 32
89184651
TR
33 struct tegra_mc *mc;
34 const struct tegra_smmu_soc *soc;
39abf8aa 35
7f4c9176
TR
36 struct list_head groups;
37
804cb54c 38 unsigned long pfn_mask;
11cec15b 39 unsigned long tlb_mask;
804cb54c 40
89184651
TR
41 unsigned long *asids;
42 struct mutex lock;
39abf8aa 43
89184651 44 struct list_head list;
d1313e78
TR
45
46 struct dentry *debugfs;
0b480e44
JR
47
48 struct iommu_device iommu; /* IOMMU Core code handle */
7a31f6f4 49};
7a31f6f4 50
89184651 51struct tegra_smmu_as {
d5f1a81c 52 struct iommu_domain domain;
89184651
TR
53 struct tegra_smmu *smmu;
54 unsigned int use_count;
404d0b30 55 spinlock_t lock;
32924c76 56 u32 *count;
853520fa 57 struct page **pts;
89184651 58 struct page *pd;
e3c97196 59 dma_addr_t pd_dma;
89184651
TR
60 unsigned id;
61 u32 attr;
7a31f6f4
HD
62};
63
d5f1a81c
JR
64static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
65{
66 return container_of(dom, struct tegra_smmu_as, domain);
67}
68
89184651
TR
69static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
70 unsigned long offset)
71{
72 writel(value, smmu->regs + offset);
73}
7a31f6f4 74
89184651
TR
75static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
76{
77 return readl(smmu->regs + offset);
78}
5a2c937a 79
89184651
TR
80#define SMMU_CONFIG 0x010
81#define SMMU_CONFIG_ENABLE (1 << 0)
7a31f6f4 82
89184651
TR
83#define SMMU_TLB_CONFIG 0x14
84#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
85#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
11cec15b
TR
86#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
87 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
0760e8fa 88
89184651
TR
89#define SMMU_PTC_CONFIG 0x18
90#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
91#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
92#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
39abf8aa 93
89184651
TR
94#define SMMU_PTB_ASID 0x01c
95#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
a3b24915 96
89184651 97#define SMMU_PTB_DATA 0x020
e3c97196 98#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
7a31f6f4 99
e3c97196 100#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
7a31f6f4 101
89184651
TR
102#define SMMU_TLB_FLUSH 0x030
103#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
104#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
105#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
89184651
TR
106#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
107 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
108#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
109 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
110#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
a6870e92 111
89184651
TR
112#define SMMU_PTC_FLUSH 0x034
113#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
114#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
a6870e92 115
89184651
TR
116#define SMMU_PTC_FLUSH_HI 0x9b8
117#define SMMU_PTC_FLUSH_HI_MASK 0x3
7a31f6f4 118
89184651
TR
119/* per-SWGROUP SMMU_*_ASID register */
120#define SMMU_ASID_ENABLE (1 << 31)
121#define SMMU_ASID_MASK 0x7f
122#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
a6870e92 123
89184651
TR
124/* page table definitions */
125#define SMMU_NUM_PDE 1024
126#define SMMU_NUM_PTE 1024
a6870e92 127
89184651
TR
128#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
129#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
7a31f6f4 130
89184651
TR
131#define SMMU_PDE_SHIFT 22
132#define SMMU_PTE_SHIFT 12
fe1229b9 133
82fa58e8
NC
134#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
135#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
136#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
137#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
138
89184651
TR
139#define SMMU_PD_READABLE (1 << 31)
140#define SMMU_PD_WRITABLE (1 << 30)
141#define SMMU_PD_NONSECURE (1 << 29)
7a31f6f4 142
89184651
TR
143#define SMMU_PDE_READABLE (1 << 31)
144#define SMMU_PDE_WRITABLE (1 << 30)
145#define SMMU_PDE_NONSECURE (1 << 29)
146#define SMMU_PDE_NEXT (1 << 28)
7a31f6f4 147
89184651
TR
148#define SMMU_PTE_READABLE (1 << 31)
149#define SMMU_PTE_WRITABLE (1 << 30)
150#define SMMU_PTE_NONSECURE (1 << 29)
7a31f6f4 151
89184651
TR
152#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
153 SMMU_PDE_NONSECURE)
7a31f6f4 154
34d35f8c
RK
155static unsigned int iova_pd_index(unsigned long iova)
156{
157 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
158}
159
160static unsigned int iova_pt_index(unsigned long iova)
161{
162 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
163}
164
e3c97196 165static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
4b3c7d10 166{
e3c97196
RK
167 addr >>= 12;
168 return (addr & smmu->pfn_mask) == addr;
169}
4b3c7d10 170
96d3ab80 171static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
e3c97196 172{
96d3ab80 173 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
4b3c7d10
RK
174}
175
b8fe0382
RK
176static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
177{
178 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
179}
180
e3c97196 181static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
89184651 182 unsigned long offset)
7a31f6f4 183{
89184651
TR
184 u32 value;
185
b8fe0382 186 offset &= ~(smmu->mc->soc->atom_size - 1);
89184651 187
b8fe0382 188 if (smmu->mc->soc->num_address_bits > 32) {
e3c97196
RK
189#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
190 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
89184651 191#else
b8fe0382 192 value = 0;
89184651 193#endif
b8fe0382 194 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
7a31f6f4 195 }
89184651 196
e3c97196 197 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
89184651 198 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
7a31f6f4
HD
199}
200
89184651 201static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
7a31f6f4 202{
89184651 203 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
7a31f6f4
HD
204}
205
89184651
TR
206static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
207 unsigned long asid)
7a31f6f4 208{
89184651 209 u32 value;
7a31f6f4 210
43a0541e
DO
211 if (smmu->soc->num_asids == 4)
212 value = (asid & 0x3) << 29;
213 else
214 value = (asid & 0x7f) << 24;
215
216 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
89184651 217 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
7a31f6f4
HD
218}
219
89184651
TR
220static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
221 unsigned long asid,
222 unsigned long iova)
7a31f6f4 223{
89184651 224 u32 value;
7a31f6f4 225
43a0541e
DO
226 if (smmu->soc->num_asids == 4)
227 value = (asid & 0x3) << 29;
228 else
229 value = (asid & 0x7f) << 24;
230
231 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
89184651 232 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
7a31f6f4
HD
233}
234
89184651
TR
235static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
236 unsigned long asid,
237 unsigned long iova)
7a31f6f4 238{
89184651 239 u32 value;
7a31f6f4 240
43a0541e
DO
241 if (smmu->soc->num_asids == 4)
242 value = (asid & 0x3) << 29;
243 else
244 value = (asid & 0x7f) << 24;
245
246 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
89184651 247 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
7a31f6f4
HD
248}
249
89184651 250static inline void smmu_flush(struct tegra_smmu *smmu)
7a31f6f4 251{
446152d5 252 smmu_readl(smmu, SMMU_PTB_ASID);
7a31f6f4
HD
253}
254
89184651 255static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
7a31f6f4 256{
89184651 257 unsigned long id;
7a31f6f4 258
89184651 259 mutex_lock(&smmu->lock);
7a31f6f4 260
89184651
TR
261 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
262 if (id >= smmu->soc->num_asids) {
263 mutex_unlock(&smmu->lock);
264 return -ENOSPC;
7a31f6f4 265 }
7a31f6f4 266
89184651
TR
267 set_bit(id, smmu->asids);
268 *idp = id;
269
270 mutex_unlock(&smmu->lock);
271 return 0;
7a31f6f4
HD
272}
273
89184651 274static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
7a31f6f4 275{
89184651
TR
276 mutex_lock(&smmu->lock);
277 clear_bit(id, smmu->asids);
278 mutex_unlock(&smmu->lock);
7a31f6f4 279}
89184651
TR
280
281static bool tegra_smmu_capable(enum iommu_cap cap)
7a31f6f4 282{
89184651 283 return false;
7a31f6f4 284}
7a31f6f4 285
d5f1a81c 286static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
7a31f6f4 287{
89184651 288 struct tegra_smmu_as *as;
7a31f6f4 289
d5f1a81c
JR
290 if (type != IOMMU_DOMAIN_UNMANAGED)
291 return NULL;
292
89184651
TR
293 as = kzalloc(sizeof(*as), GFP_KERNEL);
294 if (!as)
d5f1a81c 295 return NULL;
7a31f6f4 296
89184651 297 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
7a31f6f4 298
707917cb 299 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
89184651
TR
300 if (!as->pd) {
301 kfree(as);
d5f1a81c 302 return NULL;
7a31f6f4 303 }
9e971a03 304
32924c76 305 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
89184651
TR
306 if (!as->count) {
307 __free_page(as->pd);
308 kfree(as);
d5f1a81c 309 return NULL;
7a31f6f4 310 }
9e971a03 311
853520fa
RK
312 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
313 if (!as->pts) {
32924c76 314 kfree(as->count);
853520fa
RK
315 __free_page(as->pd);
316 kfree(as);
317 return NULL;
318 }
319
404d0b30
DO
320 spin_lock_init(&as->lock);
321
471d9144 322 /* setup aperture */
7f65ef01
JR
323 as->domain.geometry.aperture_start = 0;
324 as->domain.geometry.aperture_end = 0xffffffff;
325 as->domain.geometry.force_aperture = true;
f9a4f063 326
d5f1a81c 327 return &as->domain;
7a31f6f4
HD
328}
329
d5f1a81c 330static void tegra_smmu_domain_free(struct iommu_domain *domain)
7a31f6f4 331{
d5f1a81c 332 struct tegra_smmu_as *as = to_smmu_as(domain);
7a31f6f4 333
89184651 334 /* TODO: free page directory and page tables */
7a31f6f4 335
4f97031f
DO
336 WARN_ON_ONCE(as->use_count);
337 kfree(as->count);
338 kfree(as->pts);
89184651 339 kfree(as);
7a31f6f4
HD
340}
341
89184651
TR
342static const struct tegra_smmu_swgroup *
343tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
7a31f6f4 344{
89184651
TR
345 const struct tegra_smmu_swgroup *group = NULL;
346 unsigned int i;
7a31f6f4 347
89184651
TR
348 for (i = 0; i < smmu->soc->num_swgroups; i++) {
349 if (smmu->soc->swgroups[i].swgroup == swgroup) {
350 group = &smmu->soc->swgroups[i];
351 break;
352 }
353 }
7a31f6f4 354
89184651 355 return group;
7a31f6f4
HD
356}
357
89184651
TR
358static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
359 unsigned int asid)
7a31f6f4 360{
89184651
TR
361 const struct tegra_smmu_swgroup *group;
362 unsigned int i;
363 u32 value;
7a31f6f4 364
e31e5929
NK
365 group = tegra_smmu_find_swgroup(smmu, swgroup);
366 if (group) {
367 value = smmu_readl(smmu, group->reg);
368 value &= ~SMMU_ASID_MASK;
369 value |= SMMU_ASID_VALUE(asid);
370 value |= SMMU_ASID_ENABLE;
371 smmu_writel(smmu, value, group->reg);
372 } else {
373 pr_warn("%s group from swgroup %u not found\n", __func__,
374 swgroup);
375 /* No point moving ahead if group was not found */
376 return;
377 }
378
89184651
TR
379 for (i = 0; i < smmu->soc->num_clients; i++) {
380 const struct tegra_mc_client *client = &smmu->soc->clients[i];
7a31f6f4 381
89184651
TR
382 if (client->swgroup != swgroup)
383 continue;
7a31f6f4 384
89184651
TR
385 value = smmu_readl(smmu, client->smmu.reg);
386 value |= BIT(client->smmu.bit);
387 smmu_writel(smmu, value, client->smmu.reg);
388 }
7a31f6f4
HD
389}
390
89184651
TR
391static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
392 unsigned int asid)
7a31f6f4 393{
89184651
TR
394 const struct tegra_smmu_swgroup *group;
395 unsigned int i;
396 u32 value;
7a31f6f4 397
89184651
TR
398 group = tegra_smmu_find_swgroup(smmu, swgroup);
399 if (group) {
400 value = smmu_readl(smmu, group->reg);
401 value &= ~SMMU_ASID_MASK;
402 value |= SMMU_ASID_VALUE(asid);
403 value &= ~SMMU_ASID_ENABLE;
404 smmu_writel(smmu, value, group->reg);
405 }
7a31f6f4 406
89184651
TR
407 for (i = 0; i < smmu->soc->num_clients; i++) {
408 const struct tegra_mc_client *client = &smmu->soc->clients[i];
7a31f6f4 409
89184651
TR
410 if (client->swgroup != swgroup)
411 continue;
7a31f6f4 412
89184651
TR
413 value = smmu_readl(smmu, client->smmu.reg);
414 value &= ~BIT(client->smmu.bit);
415 smmu_writel(smmu, value, client->smmu.reg);
416 }
7a31f6f4
HD
417}
418
89184651
TR
419static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
420 struct tegra_smmu_as *as)
7a31f6f4 421{
89184651 422 u32 value;
7a31f6f4
HD
423 int err;
424
89184651
TR
425 if (as->use_count > 0) {
426 as->use_count++;
427 return 0;
7a31f6f4 428 }
7a31f6f4 429
e3c97196
RK
430 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
431 DMA_TO_DEVICE);
432 if (dma_mapping_error(smmu->dev, as->pd_dma))
433 return -ENOMEM;
434
435 /* We can't handle 64-bit DMA addresses */
436 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
437 err = -ENOMEM;
438 goto err_unmap;
439 }
440
89184651
TR
441 err = tegra_smmu_alloc_asid(smmu, &as->id);
442 if (err < 0)
e3c97196 443 goto err_unmap;
7a31f6f4 444
e3c97196 445 smmu_flush_ptc(smmu, as->pd_dma, 0);
89184651 446 smmu_flush_tlb_asid(smmu, as->id);
7a31f6f4 447
89184651 448 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
e3c97196 449 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
89184651
TR
450 smmu_writel(smmu, value, SMMU_PTB_DATA);
451 smmu_flush(smmu);
7a31f6f4 452
89184651
TR
453 as->smmu = smmu;
454 as->use_count++;
7a31f6f4 455
89184651 456 return 0;
e3c97196
RK
457
458err_unmap:
459 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
460 return err;
7a31f6f4
HD
461}
462
89184651
TR
463static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
464 struct tegra_smmu_as *as)
7a31f6f4 465{
89184651
TR
466 if (--as->use_count > 0)
467 return;
468
469 tegra_smmu_free_asid(smmu, as->id);
e3c97196
RK
470
471 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
472
89184651 473 as->smmu = NULL;
7a31f6f4
HD
474}
475
89184651
TR
476static int tegra_smmu_attach_dev(struct iommu_domain *domain,
477 struct device *dev)
7a31f6f4 478{
a5616e24 479 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
d5f1a81c 480 struct tegra_smmu_as *as = to_smmu_as(domain);
89184651
TR
481 struct device_node *np = dev->of_node;
482 struct of_phandle_args args;
483 unsigned int index = 0;
484 int err = 0;
7a31f6f4 485
89184651
TR
486 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
487 &args)) {
488 unsigned int swgroup = args.args[0];
d2453b2c 489
89184651
TR
490 if (args.np != smmu->dev->of_node) {
491 of_node_put(args.np);
d2453b2c 492 continue;
89184651 493 }
d2453b2c 494
89184651 495 of_node_put(args.np);
d2453b2c 496
89184651
TR
497 err = tegra_smmu_as_prepare(smmu, as);
498 if (err < 0)
499 return err;
500
501 tegra_smmu_enable(smmu, swgroup, as->id);
502 index++;
7a31f6f4 503 }
7a31f6f4 504
89184651
TR
505 if (index == 0)
506 return -ENODEV;
7a31f6f4 507
89184651
TR
508 return 0;
509}
7a31f6f4 510
89184651
TR
511static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
512{
d5f1a81c 513 struct tegra_smmu_as *as = to_smmu_as(domain);
89184651
TR
514 struct device_node *np = dev->of_node;
515 struct tegra_smmu *smmu = as->smmu;
516 struct of_phandle_args args;
517 unsigned int index = 0;
7a31f6f4 518
89184651
TR
519 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
520 &args)) {
521 unsigned int swgroup = args.args[0];
7a31f6f4 522
89184651
TR
523 if (args.np != smmu->dev->of_node) {
524 of_node_put(args.np);
525 continue;
526 }
23349902 527
89184651 528 of_node_put(args.np);
7a31f6f4 529
89184651
TR
530 tegra_smmu_disable(smmu, swgroup, as->id);
531 tegra_smmu_as_unprepare(smmu, as);
532 index++;
533 }
7a31f6f4
HD
534}
535
4080e99b
RK
536static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
537 u32 value)
538{
539 unsigned int pd_index = iova_pd_index(iova);
540 struct tegra_smmu *smmu = as->smmu;
541 u32 *pd = page_address(as->pd);
542 unsigned long offset = pd_index * sizeof(*pd);
543
544 /* Set the page directory entry first */
545 pd[pd_index] = value;
546
547 /* The flush the page directory entry from caches */
548 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
549 sizeof(*pd), DMA_TO_DEVICE);
550
551 /* And flush the iommu */
552 smmu_flush_ptc(smmu, as->pd_dma, offset);
553 smmu_flush_tlb_section(smmu, as->id, iova);
554 smmu_flush(smmu);
555}
556
0b42c7c1
RK
557static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
558{
559 u32 *pt = page_address(pt_page);
560
561 return pt + iova_pt_index(iova);
562}
563
564static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
e3c97196 565 dma_addr_t *dmap)
0b42c7c1
RK
566{
567 unsigned int pd_index = iova_pd_index(iova);
96d3ab80 568 struct tegra_smmu *smmu = as->smmu;
0b42c7c1 569 struct page *pt_page;
e3c97196 570 u32 *pd;
0b42c7c1 571
853520fa
RK
572 pt_page = as->pts[pd_index];
573 if (!pt_page)
0b42c7c1
RK
574 return NULL;
575
e3c97196 576 pd = page_address(as->pd);
96d3ab80 577 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
0b42c7c1
RK
578
579 return tegra_smmu_pte_offset(pt_page, iova);
580}
581
89184651 582static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
404d0b30 583 dma_addr_t *dmap, struct page *page)
7a31f6f4 584{
34d35f8c 585 unsigned int pde = iova_pd_index(iova);
89184651 586 struct tegra_smmu *smmu = as->smmu;
89184651 587
853520fa 588 if (!as->pts[pde]) {
e3c97196
RK
589 dma_addr_t dma;
590
e3c97196
RK
591 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
592 DMA_TO_DEVICE);
593 if (dma_mapping_error(smmu->dev, dma)) {
594 __free_page(page);
595 return NULL;
596 }
597
598 if (!smmu_dma_addr_valid(smmu, dma)) {
599 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
600 DMA_TO_DEVICE);
601 __free_page(page);
602 return NULL;
603 }
604
853520fa
RK
605 as->pts[pde] = page;
606
4080e99b
RK
607 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
608 SMMU_PDE_NEXT));
e3c97196
RK
609
610 *dmap = dma;
89184651 611 } else {
4080e99b
RK
612 u32 *pd = page_address(as->pd);
613
96d3ab80 614 *dmap = smmu_pde_to_dma(smmu, pd[pde]);
7a31f6f4
HD
615 }
616
7ffc6f06
RK
617 return tegra_smmu_pte_offset(as->pts[pde], iova);
618}
0b42c7c1 619
7ffc6f06
RK
620static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
621{
622 unsigned int pd_index = iova_pd_index(iova);
7a31f6f4 623
7ffc6f06 624 as->count[pd_index]++;
89184651 625}
39abf8aa 626
b98e34f0 627static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
39abf8aa 628{
34d35f8c 629 unsigned int pde = iova_pd_index(iova);
853520fa 630 struct page *page = as->pts[pde];
39abf8aa 631
89184651
TR
632 /*
633 * When no entries in this page table are used anymore, return the
634 * memory page to the system.
635 */
32924c76 636 if (--as->count[pde] == 0) {
4080e99b
RK
637 struct tegra_smmu *smmu = as->smmu;
638 u32 *pd = page_address(as->pd);
96d3ab80 639 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
39abf8aa 640
4080e99b 641 tegra_smmu_set_pde(as, iova, 0);
b98e34f0 642
e3c97196 643 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
b98e34f0 644 __free_page(page);
853520fa 645 as->pts[pde] = NULL;
39abf8aa 646 }
39abf8aa
HD
647}
648
8482ee5e 649static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
e3c97196 650 u32 *pte, dma_addr_t pte_dma, u32 val)
8482ee5e
RK
651{
652 struct tegra_smmu *smmu = as->smmu;
82fa58e8 653 unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
8482ee5e
RK
654
655 *pte = val;
656
e3c97196
RK
657 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
658 4, DMA_TO_DEVICE);
659 smmu_flush_ptc(smmu, pte_dma, offset);
8482ee5e
RK
660 smmu_flush_tlb_group(smmu, as->id, iova);
661 smmu_flush(smmu);
662}
663
404d0b30
DO
664static struct page *as_get_pde_page(struct tegra_smmu_as *as,
665 unsigned long iova, gfp_t gfp,
666 unsigned long *flags)
667{
668 unsigned int pde = iova_pd_index(iova);
669 struct page *page = as->pts[pde];
670
671 /* at first check whether allocation needs to be done at all */
672 if (page)
673 return page;
674
675 /*
676 * In order to prevent exhaustion of the atomic memory pool, we
677 * allocate page in a sleeping context if GFP flags permit. Hence
678 * spinlock needs to be unlocked and re-locked after allocation.
679 */
680 if (!(gfp & __GFP_ATOMIC))
681 spin_unlock_irqrestore(&as->lock, *flags);
682
683 page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
684
685 if (!(gfp & __GFP_ATOMIC))
686 spin_lock_irqsave(&as->lock, *flags);
687
688 /*
689 * In a case of blocking allocation, a concurrent mapping may win
690 * the PDE allocation. In this case the allocated page isn't needed
691 * if allocation succeeded and the allocation failure isn't fatal.
692 */
693 if (as->pts[pde]) {
694 if (page)
695 __free_page(page);
696
697 page = as->pts[pde];
698 }
699
700 return page;
701}
702
703static int
704__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
705 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
706 unsigned long *flags)
39abf8aa 707{
d5f1a81c 708 struct tegra_smmu_as *as = to_smmu_as(domain);
e3c97196 709 dma_addr_t pte_dma;
404d0b30 710 struct page *page;
43d957b1 711 u32 pte_attrs;
89184651 712 u32 *pte;
39abf8aa 713
404d0b30
DO
714 page = as_get_pde_page(as, iova, gfp, flags);
715 if (!page)
716 return -ENOMEM;
717
718 pte = as_get_pte(as, iova, &pte_dma, page);
89184651
TR
719 if (!pte)
720 return -ENOMEM;
39abf8aa 721
7ffc6f06
RK
722 /* If we aren't overwriting a pre-existing entry, increment use */
723 if (*pte == 0)
724 tegra_smmu_pte_get_use(as, iova);
725
43d957b1
DO
726 pte_attrs = SMMU_PTE_NONSECURE;
727
728 if (prot & IOMMU_READ)
729 pte_attrs |= SMMU_PTE_READABLE;
730
731 if (prot & IOMMU_WRITE)
732 pte_attrs |= SMMU_PTE_WRITABLE;
733
e3c97196 734 tegra_smmu_set_pte(as, iova, pte, pte_dma,
82fa58e8 735 SMMU_PHYS_PFN(paddr) | pte_attrs);
39abf8aa 736
39abf8aa
HD
737 return 0;
738}
739
404d0b30
DO
740static size_t
741__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
742 size_t size, struct iommu_iotlb_gather *gather)
39abf8aa 743{
d5f1a81c 744 struct tegra_smmu_as *as = to_smmu_as(domain);
e3c97196 745 dma_addr_t pte_dma;
89184651 746 u32 *pte;
39abf8aa 747
e3c97196 748 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
b98e34f0 749 if (!pte || !*pte)
89184651 750 return 0;
39abf8aa 751
e3c97196 752 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
b98e34f0
RK
753 tegra_smmu_pte_put_use(as, iova);
754
89184651 755 return size;
39abf8aa
HD
756}
757
404d0b30
DO
758static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
759 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
760{
761 struct tegra_smmu_as *as = to_smmu_as(domain);
762 unsigned long flags;
763 int ret;
764
765 spin_lock_irqsave(&as->lock, flags);
766 ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
767 spin_unlock_irqrestore(&as->lock, flags);
768
769 return ret;
770}
771
772static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
773 size_t size, struct iommu_iotlb_gather *gather)
774{
775 struct tegra_smmu_as *as = to_smmu_as(domain);
776 unsigned long flags;
777
778 spin_lock_irqsave(&as->lock, flags);
779 size = __tegra_smmu_unmap(domain, iova, size, gather);
780 spin_unlock_irqrestore(&as->lock, flags);
781
782 return size;
783}
784
89184651
TR
785static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
786 dma_addr_t iova)
39abf8aa 787{
d5f1a81c 788 struct tegra_smmu_as *as = to_smmu_as(domain);
89184651 789 unsigned long pfn;
e3c97196 790 dma_addr_t pte_dma;
89184651 791 u32 *pte;
39abf8aa 792
e3c97196 793 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
9113785c
RK
794 if (!pte || !*pte)
795 return 0;
796
804cb54c 797 pfn = *pte & as->smmu->pfn_mask;
39abf8aa 798
4fba9885 799 return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
39abf8aa
HD
800}
801
89184651 802static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
7a31f6f4 803{
89184651
TR
804 struct platform_device *pdev;
805 struct tegra_mc *mc;
7a31f6f4 806
89184651
TR
807 pdev = of_find_device_by_node(np);
808 if (!pdev)
809 return NULL;
810
811 mc = platform_get_drvdata(pdev);
812 if (!mc)
813 return NULL;
814
815 return mc->smmu;
7a31f6f4
HD
816}
817
7f4c9176
TR
818static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
819 struct of_phandle_args *args)
820{
821 const struct iommu_ops *ops = smmu->iommu.ops;
822 int err;
823
824 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
825 if (err < 0) {
826 dev_err(dev, "failed to initialize fwspec: %d\n", err);
827 return err;
828 }
829
830 err = ops->of_xlate(dev, args);
831 if (err < 0) {
832 dev_err(dev, "failed to parse SW group ID: %d\n", err);
833 iommu_fwspec_free(dev);
834 return err;
835 }
836
837 return 0;
838}
839
b287ba73 840static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
7a31f6f4 841{
89184651 842 struct device_node *np = dev->of_node;
7f4c9176 843 struct tegra_smmu *smmu = NULL;
89184651
TR
844 struct of_phandle_args args;
845 unsigned int index = 0;
7f4c9176 846 int err;
7a31f6f4 847
89184651
TR
848 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
849 &args) == 0) {
89184651
TR
850 smmu = tegra_smmu_find(args.np);
851 if (smmu) {
7f4c9176
TR
852 err = tegra_smmu_configure(smmu, dev, &args);
853 of_node_put(args.np);
854
855 if (err < 0)
b287ba73 856 return ERR_PTR(err);
7f4c9176 857
89184651
TR
858 /*
859 * Only a single IOMMU master interface is currently
860 * supported by the Linux kernel, so abort after the
861 * first match.
862 */
a5616e24 863 dev_iommu_priv_set(dev, smmu);
0b480e44 864
89184651
TR
865 break;
866 }
867
7f4c9176 868 of_node_put(args.np);
89184651
TR
869 index++;
870 }
871
7f4c9176 872 if (!smmu)
b287ba73 873 return ERR_PTR(-ENODEV);
d92e1f84 874
b287ba73 875 return &smmu->iommu;
7a31f6f4
HD
876}
877
b287ba73 878static void tegra_smmu_release_device(struct device *dev)
7a31f6f4 879{
a5616e24 880 dev_iommu_priv_set(dev, NULL);
89184651 881}
7a31f6f4 882
7f4c9176
TR
883static const struct tegra_smmu_group_soc *
884tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
885{
886 unsigned int i, j;
887
888 for (i = 0; i < smmu->soc->num_groups; i++)
889 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
890 if (smmu->soc->groups[i].swgroups[j] == swgroup)
891 return &smmu->soc->groups[i];
892
893 return NULL;
894}
895
1ea5440e
TR
896static void tegra_smmu_group_release(void *iommu_data)
897{
898 struct tegra_smmu_group *group = iommu_data;
899 struct tegra_smmu *smmu = group->smmu;
900
901 mutex_lock(&smmu->lock);
902 list_del(&group->list);
903 mutex_unlock(&smmu->lock);
904}
905
cf910f61 906static struct iommu_group *tegra_smmu_device_group(struct device *dev)
7f4c9176 907{
cf910f61
NC
908 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
909 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
7f4c9176 910 const struct tegra_smmu_group_soc *soc;
cf910f61 911 unsigned int swgroup = fwspec->ids[0];
7f4c9176 912 struct tegra_smmu_group *group;
5b30fbfa 913 struct iommu_group *grp;
7f4c9176 914
21d3c040 915 /* Find group_soc associating with swgroup */
7f4c9176 916 soc = tegra_smmu_find_group(smmu, swgroup);
7f4c9176
TR
917
918 mutex_lock(&smmu->lock);
919
21d3c040 920 /* Find existing iommu_group associating with swgroup or group_soc */
7f4c9176 921 list_for_each_entry(group, &smmu->groups, list)
21d3c040 922 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
5b30fbfa 923 grp = iommu_group_ref_get(group->group);
7f4c9176 924 mutex_unlock(&smmu->lock);
5b30fbfa 925 return grp;
7f4c9176
TR
926 }
927
928 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
929 if (!group) {
930 mutex_unlock(&smmu->lock);
931 return NULL;
932 }
933
934 INIT_LIST_HEAD(&group->list);
21d3c040 935 group->swgroup = swgroup;
1ea5440e 936 group->smmu = smmu;
7f4c9176
TR
937 group->soc = soc;
938
939 group->group = iommu_group_alloc();
83476bfa 940 if (IS_ERR(group->group)) {
7f4c9176
TR
941 devm_kfree(smmu->dev, group);
942 mutex_unlock(&smmu->lock);
943 return NULL;
944 }
945
1ea5440e 946 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
21d3c040
NC
947 if (soc)
948 iommu_group_set_name(group->group, soc->name);
7f4c9176
TR
949 list_add_tail(&group->list, &smmu->groups);
950 mutex_unlock(&smmu->lock);
951
952 return group->group;
953}
954
7f4c9176
TR
955static int tegra_smmu_of_xlate(struct device *dev,
956 struct of_phandle_args *args)
957{
958 u32 id = args->args[0];
959
960 return iommu_fwspec_add_ids(dev, &id, 1);
961}
962
89184651
TR
963static const struct iommu_ops tegra_smmu_ops = {
964 .capable = tegra_smmu_capable,
d5f1a81c
JR
965 .domain_alloc = tegra_smmu_domain_alloc,
966 .domain_free = tegra_smmu_domain_free,
89184651
TR
967 .attach_dev = tegra_smmu_attach_dev,
968 .detach_dev = tegra_smmu_detach_dev,
b287ba73
JR
969 .probe_device = tegra_smmu_probe_device,
970 .release_device = tegra_smmu_release_device,
7f4c9176 971 .device_group = tegra_smmu_device_group,
89184651
TR
972 .map = tegra_smmu_map,
973 .unmap = tegra_smmu_unmap,
89184651 974 .iova_to_phys = tegra_smmu_iova_to_phys,
7f4c9176 975 .of_xlate = tegra_smmu_of_xlate,
89184651
TR
976 .pgsize_bitmap = SZ_4K,
977};
7a31f6f4 978
89184651
TR
979static void tegra_smmu_ahb_enable(void)
980{
981 static const struct of_device_id ahb_match[] = {
982 { .compatible = "nvidia,tegra30-ahb", },
983 { }
984 };
985 struct device_node *ahb;
7a31f6f4 986
89184651
TR
987 ahb = of_find_matching_node(NULL, ahb_match);
988 if (ahb) {
989 tegra_ahb_enable_smmu(ahb);
990 of_node_put(ahb);
7a31f6f4 991 }
89184651 992}
7a31f6f4 993
d1313e78
TR
994static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
995{
996 struct tegra_smmu *smmu = s->private;
997 unsigned int i;
998 u32 value;
999
1000 seq_printf(s, "swgroup enabled ASID\n");
1001 seq_printf(s, "------------------------\n");
1002
1003 for (i = 0; i < smmu->soc->num_swgroups; i++) {
1004 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1005 const char *status;
1006 unsigned int asid;
1007
1008 value = smmu_readl(smmu, group->reg);
1009
1010 if (value & SMMU_ASID_ENABLE)
1011 status = "yes";
1012 else
1013 status = "no";
1014
1015 asid = value & SMMU_ASID_MASK;
1016
1017 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
1018 asid);
1019 }
1020
1021 return 0;
1022}
1023
062e52a5 1024DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
d1313e78
TR
1025
1026static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1027{
1028 struct tegra_smmu *smmu = s->private;
1029 unsigned int i;
1030 u32 value;
1031
1032 seq_printf(s, "client enabled\n");
1033 seq_printf(s, "--------------------\n");
1034
1035 for (i = 0; i < smmu->soc->num_clients; i++) {
1036 const struct tegra_mc_client *client = &smmu->soc->clients[i];
1037 const char *status;
1038
1039 value = smmu_readl(smmu, client->smmu.reg);
1040
1041 if (value & BIT(client->smmu.bit))
1042 status = "yes";
1043 else
1044 status = "no";
1045
1046 seq_printf(s, "%-12s %s\n", client->name, status);
1047 }
1048
1049 return 0;
1050}
1051
062e52a5 1052DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
d1313e78
TR
1053
1054static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1055{
1056 smmu->debugfs = debugfs_create_dir("smmu", NULL);
1057 if (!smmu->debugfs)
1058 return;
1059
1060 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1061 &tegra_smmu_swgroups_fops);
1062 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1063 &tegra_smmu_clients_fops);
1064}
1065
1066static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1067{
1068 debugfs_remove_recursive(smmu->debugfs);
1069}
1070
89184651
TR
1071struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1072 const struct tegra_smmu_soc *soc,
1073 struct tegra_mc *mc)
1074{
1075 struct tegra_smmu *smmu;
1076 size_t size;
1077 u32 value;
1078 int err;
7a31f6f4 1079
89184651
TR
1080 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1081 if (!smmu)
1082 return ERR_PTR(-ENOMEM);
0760e8fa 1083
89184651
TR
1084 /*
1085 * This is a bit of a hack. Ideally we'd want to simply return this
1086 * value. However the IOMMU registration process will attempt to add
1087 * all devices to the IOMMU when bus_set_iommu() is called. In order
1088 * not to rely on global variables to track the IOMMU instance, we
b287ba73 1089 * set it here so that it can be looked up from the .probe_device()
89184651
TR
1090 * callback via the IOMMU device's .drvdata field.
1091 */
1092 mc->smmu = smmu;
0760e8fa 1093
89184651 1094 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
0760e8fa 1095
89184651
TR
1096 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1097 if (!smmu->asids)
1098 return ERR_PTR(-ENOMEM);
7a31f6f4 1099
7f4c9176 1100 INIT_LIST_HEAD(&smmu->groups);
89184651 1101 mutex_init(&smmu->lock);
7a31f6f4 1102
89184651
TR
1103 smmu->regs = mc->regs;
1104 smmu->soc = soc;
1105 smmu->dev = dev;
1106 smmu->mc = mc;
7a31f6f4 1107
82fa58e8
NC
1108 smmu->pfn_mask =
1109 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
804cb54c
TR
1110 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1111 mc->soc->num_address_bits, smmu->pfn_mask);
d5c152c3 1112 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
11cec15b
TR
1113 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1114 smmu->tlb_mask);
804cb54c 1115
89184651 1116 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
7a31f6f4 1117
89184651
TR
1118 if (soc->supports_request_limit)
1119 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
39abf8aa 1120
89184651 1121 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
7a31f6f4 1122
89184651 1123 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
11cec15b 1124 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
7a31f6f4 1125
89184651
TR
1126 if (soc->supports_round_robin_arbitration)
1127 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
7a31f6f4 1128
89184651 1129 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
7a31f6f4 1130
b8fe0382 1131 smmu_flush_ptc_all(smmu);
89184651
TR
1132 smmu_flush_tlb(smmu);
1133 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1134 smmu_flush(smmu);
1135
1136 tegra_smmu_ahb_enable();
7a31f6f4 1137
0b480e44
JR
1138 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1139 if (err)
1140 return ERR_PTR(err);
1141
1142 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
7f4c9176 1143 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
0b480e44
JR
1144
1145 err = iommu_device_register(&smmu->iommu);
1146 if (err) {
1147 iommu_device_sysfs_remove(&smmu->iommu);
1148 return ERR_PTR(err);
1149 }
1150
96302d89
JR
1151 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1152 if (err < 0) {
1153 iommu_device_unregister(&smmu->iommu);
1154 iommu_device_sysfs_remove(&smmu->iommu);
1155 return ERR_PTR(err);
1156 }
1157
d1313e78
TR
1158 if (IS_ENABLED(CONFIG_DEBUG_FS))
1159 tegra_smmu_debugfs_init(smmu);
1160
89184651
TR
1161 return smmu;
1162}
d1313e78
TR
1163
1164void tegra_smmu_remove(struct tegra_smmu *smmu)
1165{
0b480e44
JR
1166 iommu_device_unregister(&smmu->iommu);
1167 iommu_device_sysfs_remove(&smmu->iommu);
1168
d1313e78
TR
1169 if (IS_ENABLED(CONFIG_DEBUG_FS))
1170 tegra_smmu_debugfs_exit(smmu);
1171}