iommu/tegra-smmu: Do not use PAGE_SHIFT and PAGE_MASK
[linux-block.git] / drivers / iommu / tegra-smmu.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
7a31f6f4 2/*
89184651 3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
7a31f6f4
HD
4 */
5
804cb54c 6#include <linux/bitops.h>
d1313e78 7#include <linux/debugfs.h>
bc5e6dea 8#include <linux/err.h>
7a31f6f4 9#include <linux/iommu.h>
89184651 10#include <linux/kernel.h>
0760e8fa 11#include <linux/of.h>
89184651
TR
12#include <linux/of_device.h>
13#include <linux/platform_device.h>
14#include <linux/slab.h>
404d0b30 15#include <linux/spinlock.h>
461a6946 16#include <linux/dma-mapping.h>
306a7f91
TR
17
18#include <soc/tegra/ahb.h>
89184651 19#include <soc/tegra/mc.h>
7a31f6f4 20
7f4c9176
TR
21struct tegra_smmu_group {
22 struct list_head list;
1ea5440e 23 struct tegra_smmu *smmu;
7f4c9176
TR
24 const struct tegra_smmu_group_soc *soc;
25 struct iommu_group *group;
26};
27
89184651
TR
28struct tegra_smmu {
29 void __iomem *regs;
30 struct device *dev;
e6bc5933 31
89184651
TR
32 struct tegra_mc *mc;
33 const struct tegra_smmu_soc *soc;
39abf8aa 34
7f4c9176
TR
35 struct list_head groups;
36
804cb54c 37 unsigned long pfn_mask;
11cec15b 38 unsigned long tlb_mask;
804cb54c 39
89184651
TR
40 unsigned long *asids;
41 struct mutex lock;
39abf8aa 42
89184651 43 struct list_head list;
d1313e78
TR
44
45 struct dentry *debugfs;
0b480e44
JR
46
47 struct iommu_device iommu; /* IOMMU Core code handle */
7a31f6f4 48};
7a31f6f4 49
89184651 50struct tegra_smmu_as {
d5f1a81c 51 struct iommu_domain domain;
89184651
TR
52 struct tegra_smmu *smmu;
53 unsigned int use_count;
404d0b30 54 spinlock_t lock;
32924c76 55 u32 *count;
853520fa 56 struct page **pts;
89184651 57 struct page *pd;
e3c97196 58 dma_addr_t pd_dma;
89184651
TR
59 unsigned id;
60 u32 attr;
7a31f6f4
HD
61};
62
d5f1a81c
JR
63static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
64{
65 return container_of(dom, struct tegra_smmu_as, domain);
66}
67
89184651
TR
68static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
69 unsigned long offset)
70{
71 writel(value, smmu->regs + offset);
72}
7a31f6f4 73
89184651
TR
74static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
75{
76 return readl(smmu->regs + offset);
77}
5a2c937a 78
89184651
TR
79#define SMMU_CONFIG 0x010
80#define SMMU_CONFIG_ENABLE (1 << 0)
7a31f6f4 81
89184651
TR
82#define SMMU_TLB_CONFIG 0x14
83#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
84#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
11cec15b
TR
85#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
86 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
0760e8fa 87
89184651
TR
88#define SMMU_PTC_CONFIG 0x18
89#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
90#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
91#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
39abf8aa 92
89184651
TR
93#define SMMU_PTB_ASID 0x01c
94#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
a3b24915 95
89184651 96#define SMMU_PTB_DATA 0x020
e3c97196 97#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
7a31f6f4 98
e3c97196 99#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
7a31f6f4 100
89184651
TR
101#define SMMU_TLB_FLUSH 0x030
102#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
103#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
104#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
89184651
TR
105#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
106 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
107#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
108 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
109#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
a6870e92 110
89184651
TR
111#define SMMU_PTC_FLUSH 0x034
112#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
113#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
a6870e92 114
89184651
TR
115#define SMMU_PTC_FLUSH_HI 0x9b8
116#define SMMU_PTC_FLUSH_HI_MASK 0x3
7a31f6f4 117
89184651
TR
118/* per-SWGROUP SMMU_*_ASID register */
119#define SMMU_ASID_ENABLE (1 << 31)
120#define SMMU_ASID_MASK 0x7f
121#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
a6870e92 122
89184651
TR
123/* page table definitions */
124#define SMMU_NUM_PDE 1024
125#define SMMU_NUM_PTE 1024
a6870e92 126
89184651
TR
127#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
128#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
7a31f6f4 129
89184651
TR
130#define SMMU_PDE_SHIFT 22
131#define SMMU_PTE_SHIFT 12
fe1229b9 132
82fa58e8
NC
133#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
134#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
135#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
136#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
137
89184651
TR
138#define SMMU_PD_READABLE (1 << 31)
139#define SMMU_PD_WRITABLE (1 << 30)
140#define SMMU_PD_NONSECURE (1 << 29)
7a31f6f4 141
89184651
TR
142#define SMMU_PDE_READABLE (1 << 31)
143#define SMMU_PDE_WRITABLE (1 << 30)
144#define SMMU_PDE_NONSECURE (1 << 29)
145#define SMMU_PDE_NEXT (1 << 28)
7a31f6f4 146
89184651
TR
147#define SMMU_PTE_READABLE (1 << 31)
148#define SMMU_PTE_WRITABLE (1 << 30)
149#define SMMU_PTE_NONSECURE (1 << 29)
7a31f6f4 150
89184651
TR
151#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
152 SMMU_PDE_NONSECURE)
7a31f6f4 153
34d35f8c
RK
154static unsigned int iova_pd_index(unsigned long iova)
155{
156 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
157}
158
159static unsigned int iova_pt_index(unsigned long iova)
160{
161 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
162}
163
e3c97196 164static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
4b3c7d10 165{
e3c97196
RK
166 addr >>= 12;
167 return (addr & smmu->pfn_mask) == addr;
168}
4b3c7d10 169
96d3ab80 170static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
e3c97196 171{
96d3ab80 172 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
4b3c7d10
RK
173}
174
b8fe0382
RK
175static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
176{
177 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
178}
179
e3c97196 180static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
89184651 181 unsigned long offset)
7a31f6f4 182{
89184651
TR
183 u32 value;
184
b8fe0382 185 offset &= ~(smmu->mc->soc->atom_size - 1);
89184651 186
b8fe0382 187 if (smmu->mc->soc->num_address_bits > 32) {
e3c97196
RK
188#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
189 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
89184651 190#else
b8fe0382 191 value = 0;
89184651 192#endif
b8fe0382 193 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
7a31f6f4 194 }
89184651 195
e3c97196 196 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
89184651 197 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
7a31f6f4
HD
198}
199
89184651 200static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
7a31f6f4 201{
89184651 202 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
7a31f6f4
HD
203}
204
89184651
TR
205static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
206 unsigned long asid)
7a31f6f4 207{
89184651 208 u32 value;
7a31f6f4 209
43a0541e
DO
210 if (smmu->soc->num_asids == 4)
211 value = (asid & 0x3) << 29;
212 else
213 value = (asid & 0x7f) << 24;
214
215 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
89184651 216 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
7a31f6f4
HD
217}
218
89184651
TR
219static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
220 unsigned long asid,
221 unsigned long iova)
7a31f6f4 222{
89184651 223 u32 value;
7a31f6f4 224
43a0541e
DO
225 if (smmu->soc->num_asids == 4)
226 value = (asid & 0x3) << 29;
227 else
228 value = (asid & 0x7f) << 24;
229
230 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
89184651 231 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
7a31f6f4
HD
232}
233
89184651
TR
234static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
235 unsigned long asid,
236 unsigned long iova)
7a31f6f4 237{
89184651 238 u32 value;
7a31f6f4 239
43a0541e
DO
240 if (smmu->soc->num_asids == 4)
241 value = (asid & 0x3) << 29;
242 else
243 value = (asid & 0x7f) << 24;
244
245 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
89184651 246 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
7a31f6f4
HD
247}
248
89184651 249static inline void smmu_flush(struct tegra_smmu *smmu)
7a31f6f4 250{
446152d5 251 smmu_readl(smmu, SMMU_PTB_ASID);
7a31f6f4
HD
252}
253
89184651 254static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
7a31f6f4 255{
89184651 256 unsigned long id;
7a31f6f4 257
89184651 258 mutex_lock(&smmu->lock);
7a31f6f4 259
89184651
TR
260 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
261 if (id >= smmu->soc->num_asids) {
262 mutex_unlock(&smmu->lock);
263 return -ENOSPC;
7a31f6f4 264 }
7a31f6f4 265
89184651
TR
266 set_bit(id, smmu->asids);
267 *idp = id;
268
269 mutex_unlock(&smmu->lock);
270 return 0;
7a31f6f4
HD
271}
272
89184651 273static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
7a31f6f4 274{
89184651
TR
275 mutex_lock(&smmu->lock);
276 clear_bit(id, smmu->asids);
277 mutex_unlock(&smmu->lock);
7a31f6f4 278}
89184651
TR
279
280static bool tegra_smmu_capable(enum iommu_cap cap)
7a31f6f4 281{
89184651 282 return false;
7a31f6f4 283}
7a31f6f4 284
d5f1a81c 285static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
7a31f6f4 286{
89184651 287 struct tegra_smmu_as *as;
7a31f6f4 288
d5f1a81c
JR
289 if (type != IOMMU_DOMAIN_UNMANAGED)
290 return NULL;
291
89184651
TR
292 as = kzalloc(sizeof(*as), GFP_KERNEL);
293 if (!as)
d5f1a81c 294 return NULL;
7a31f6f4 295
89184651 296 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
7a31f6f4 297
707917cb 298 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
89184651
TR
299 if (!as->pd) {
300 kfree(as);
d5f1a81c 301 return NULL;
7a31f6f4 302 }
9e971a03 303
32924c76 304 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
89184651
TR
305 if (!as->count) {
306 __free_page(as->pd);
307 kfree(as);
d5f1a81c 308 return NULL;
7a31f6f4 309 }
9e971a03 310
853520fa
RK
311 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
312 if (!as->pts) {
32924c76 313 kfree(as->count);
853520fa
RK
314 __free_page(as->pd);
315 kfree(as);
316 return NULL;
317 }
318
404d0b30
DO
319 spin_lock_init(&as->lock);
320
471d9144 321 /* setup aperture */
7f65ef01
JR
322 as->domain.geometry.aperture_start = 0;
323 as->domain.geometry.aperture_end = 0xffffffff;
324 as->domain.geometry.force_aperture = true;
f9a4f063 325
d5f1a81c 326 return &as->domain;
7a31f6f4
HD
327}
328
d5f1a81c 329static void tegra_smmu_domain_free(struct iommu_domain *domain)
7a31f6f4 330{
d5f1a81c 331 struct tegra_smmu_as *as = to_smmu_as(domain);
7a31f6f4 332
89184651 333 /* TODO: free page directory and page tables */
7a31f6f4 334
4f97031f
DO
335 WARN_ON_ONCE(as->use_count);
336 kfree(as->count);
337 kfree(as->pts);
89184651 338 kfree(as);
7a31f6f4
HD
339}
340
89184651
TR
341static const struct tegra_smmu_swgroup *
342tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
7a31f6f4 343{
89184651
TR
344 const struct tegra_smmu_swgroup *group = NULL;
345 unsigned int i;
7a31f6f4 346
89184651
TR
347 for (i = 0; i < smmu->soc->num_swgroups; i++) {
348 if (smmu->soc->swgroups[i].swgroup == swgroup) {
349 group = &smmu->soc->swgroups[i];
350 break;
351 }
352 }
7a31f6f4 353
89184651 354 return group;
7a31f6f4
HD
355}
356
89184651
TR
357static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
358 unsigned int asid)
7a31f6f4 359{
89184651
TR
360 const struct tegra_smmu_swgroup *group;
361 unsigned int i;
362 u32 value;
7a31f6f4 363
e31e5929
NK
364 group = tegra_smmu_find_swgroup(smmu, swgroup);
365 if (group) {
366 value = smmu_readl(smmu, group->reg);
367 value &= ~SMMU_ASID_MASK;
368 value |= SMMU_ASID_VALUE(asid);
369 value |= SMMU_ASID_ENABLE;
370 smmu_writel(smmu, value, group->reg);
371 } else {
372 pr_warn("%s group from swgroup %u not found\n", __func__,
373 swgroup);
374 /* No point moving ahead if group was not found */
375 return;
376 }
377
89184651
TR
378 for (i = 0; i < smmu->soc->num_clients; i++) {
379 const struct tegra_mc_client *client = &smmu->soc->clients[i];
7a31f6f4 380
89184651
TR
381 if (client->swgroup != swgroup)
382 continue;
7a31f6f4 383
89184651
TR
384 value = smmu_readl(smmu, client->smmu.reg);
385 value |= BIT(client->smmu.bit);
386 smmu_writel(smmu, value, client->smmu.reg);
387 }
7a31f6f4
HD
388}
389
89184651
TR
390static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
391 unsigned int asid)
7a31f6f4 392{
89184651
TR
393 const struct tegra_smmu_swgroup *group;
394 unsigned int i;
395 u32 value;
7a31f6f4 396
89184651
TR
397 group = tegra_smmu_find_swgroup(smmu, swgroup);
398 if (group) {
399 value = smmu_readl(smmu, group->reg);
400 value &= ~SMMU_ASID_MASK;
401 value |= SMMU_ASID_VALUE(asid);
402 value &= ~SMMU_ASID_ENABLE;
403 smmu_writel(smmu, value, group->reg);
404 }
7a31f6f4 405
89184651
TR
406 for (i = 0; i < smmu->soc->num_clients; i++) {
407 const struct tegra_mc_client *client = &smmu->soc->clients[i];
7a31f6f4 408
89184651
TR
409 if (client->swgroup != swgroup)
410 continue;
7a31f6f4 411
89184651
TR
412 value = smmu_readl(smmu, client->smmu.reg);
413 value &= ~BIT(client->smmu.bit);
414 smmu_writel(smmu, value, client->smmu.reg);
415 }
7a31f6f4
HD
416}
417
89184651
TR
418static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
419 struct tegra_smmu_as *as)
7a31f6f4 420{
89184651 421 u32 value;
7a31f6f4
HD
422 int err;
423
89184651
TR
424 if (as->use_count > 0) {
425 as->use_count++;
426 return 0;
7a31f6f4 427 }
7a31f6f4 428
e3c97196
RK
429 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
430 DMA_TO_DEVICE);
431 if (dma_mapping_error(smmu->dev, as->pd_dma))
432 return -ENOMEM;
433
434 /* We can't handle 64-bit DMA addresses */
435 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
436 err = -ENOMEM;
437 goto err_unmap;
438 }
439
89184651
TR
440 err = tegra_smmu_alloc_asid(smmu, &as->id);
441 if (err < 0)
e3c97196 442 goto err_unmap;
7a31f6f4 443
e3c97196 444 smmu_flush_ptc(smmu, as->pd_dma, 0);
89184651 445 smmu_flush_tlb_asid(smmu, as->id);
7a31f6f4 446
89184651 447 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
e3c97196 448 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
89184651
TR
449 smmu_writel(smmu, value, SMMU_PTB_DATA);
450 smmu_flush(smmu);
7a31f6f4 451
89184651
TR
452 as->smmu = smmu;
453 as->use_count++;
7a31f6f4 454
89184651 455 return 0;
e3c97196
RK
456
457err_unmap:
458 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
459 return err;
7a31f6f4
HD
460}
461
89184651
TR
462static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
463 struct tegra_smmu_as *as)
7a31f6f4 464{
89184651
TR
465 if (--as->use_count > 0)
466 return;
467
468 tegra_smmu_free_asid(smmu, as->id);
e3c97196
RK
469
470 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
471
89184651 472 as->smmu = NULL;
7a31f6f4
HD
473}
474
89184651
TR
475static int tegra_smmu_attach_dev(struct iommu_domain *domain,
476 struct device *dev)
7a31f6f4 477{
a5616e24 478 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
d5f1a81c 479 struct tegra_smmu_as *as = to_smmu_as(domain);
89184651
TR
480 struct device_node *np = dev->of_node;
481 struct of_phandle_args args;
482 unsigned int index = 0;
483 int err = 0;
7a31f6f4 484
89184651
TR
485 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
486 &args)) {
487 unsigned int swgroup = args.args[0];
d2453b2c 488
89184651
TR
489 if (args.np != smmu->dev->of_node) {
490 of_node_put(args.np);
d2453b2c 491 continue;
89184651 492 }
d2453b2c 493
89184651 494 of_node_put(args.np);
d2453b2c 495
89184651
TR
496 err = tegra_smmu_as_prepare(smmu, as);
497 if (err < 0)
498 return err;
499
500 tegra_smmu_enable(smmu, swgroup, as->id);
501 index++;
7a31f6f4 502 }
7a31f6f4 503
89184651
TR
504 if (index == 0)
505 return -ENODEV;
7a31f6f4 506
89184651
TR
507 return 0;
508}
7a31f6f4 509
89184651
TR
510static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
511{
d5f1a81c 512 struct tegra_smmu_as *as = to_smmu_as(domain);
89184651
TR
513 struct device_node *np = dev->of_node;
514 struct tegra_smmu *smmu = as->smmu;
515 struct of_phandle_args args;
516 unsigned int index = 0;
7a31f6f4 517
89184651
TR
518 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
519 &args)) {
520 unsigned int swgroup = args.args[0];
7a31f6f4 521
89184651
TR
522 if (args.np != smmu->dev->of_node) {
523 of_node_put(args.np);
524 continue;
525 }
23349902 526
89184651 527 of_node_put(args.np);
7a31f6f4 528
89184651
TR
529 tegra_smmu_disable(smmu, swgroup, as->id);
530 tegra_smmu_as_unprepare(smmu, as);
531 index++;
532 }
7a31f6f4
HD
533}
534
4080e99b
RK
535static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
536 u32 value)
537{
538 unsigned int pd_index = iova_pd_index(iova);
539 struct tegra_smmu *smmu = as->smmu;
540 u32 *pd = page_address(as->pd);
541 unsigned long offset = pd_index * sizeof(*pd);
542
543 /* Set the page directory entry first */
544 pd[pd_index] = value;
545
546 /* The flush the page directory entry from caches */
547 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
548 sizeof(*pd), DMA_TO_DEVICE);
549
550 /* And flush the iommu */
551 smmu_flush_ptc(smmu, as->pd_dma, offset);
552 smmu_flush_tlb_section(smmu, as->id, iova);
553 smmu_flush(smmu);
554}
555
0b42c7c1
RK
556static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
557{
558 u32 *pt = page_address(pt_page);
559
560 return pt + iova_pt_index(iova);
561}
562
563static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
e3c97196 564 dma_addr_t *dmap)
0b42c7c1
RK
565{
566 unsigned int pd_index = iova_pd_index(iova);
96d3ab80 567 struct tegra_smmu *smmu = as->smmu;
0b42c7c1 568 struct page *pt_page;
e3c97196 569 u32 *pd;
0b42c7c1 570
853520fa
RK
571 pt_page = as->pts[pd_index];
572 if (!pt_page)
0b42c7c1
RK
573 return NULL;
574
e3c97196 575 pd = page_address(as->pd);
96d3ab80 576 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
0b42c7c1
RK
577
578 return tegra_smmu_pte_offset(pt_page, iova);
579}
580
89184651 581static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
404d0b30 582 dma_addr_t *dmap, struct page *page)
7a31f6f4 583{
34d35f8c 584 unsigned int pde = iova_pd_index(iova);
89184651 585 struct tegra_smmu *smmu = as->smmu;
89184651 586
853520fa 587 if (!as->pts[pde]) {
e3c97196
RK
588 dma_addr_t dma;
589
e3c97196
RK
590 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
591 DMA_TO_DEVICE);
592 if (dma_mapping_error(smmu->dev, dma)) {
593 __free_page(page);
594 return NULL;
595 }
596
597 if (!smmu_dma_addr_valid(smmu, dma)) {
598 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
599 DMA_TO_DEVICE);
600 __free_page(page);
601 return NULL;
602 }
603
853520fa
RK
604 as->pts[pde] = page;
605
4080e99b
RK
606 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
607 SMMU_PDE_NEXT));
e3c97196
RK
608
609 *dmap = dma;
89184651 610 } else {
4080e99b
RK
611 u32 *pd = page_address(as->pd);
612
96d3ab80 613 *dmap = smmu_pde_to_dma(smmu, pd[pde]);
7a31f6f4
HD
614 }
615
7ffc6f06
RK
616 return tegra_smmu_pte_offset(as->pts[pde], iova);
617}
0b42c7c1 618
7ffc6f06
RK
619static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
620{
621 unsigned int pd_index = iova_pd_index(iova);
7a31f6f4 622
7ffc6f06 623 as->count[pd_index]++;
89184651 624}
39abf8aa 625
b98e34f0 626static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
39abf8aa 627{
34d35f8c 628 unsigned int pde = iova_pd_index(iova);
853520fa 629 struct page *page = as->pts[pde];
39abf8aa 630
89184651
TR
631 /*
632 * When no entries in this page table are used anymore, return the
633 * memory page to the system.
634 */
32924c76 635 if (--as->count[pde] == 0) {
4080e99b
RK
636 struct tegra_smmu *smmu = as->smmu;
637 u32 *pd = page_address(as->pd);
96d3ab80 638 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
39abf8aa 639
4080e99b 640 tegra_smmu_set_pde(as, iova, 0);
b98e34f0 641
e3c97196 642 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
b98e34f0 643 __free_page(page);
853520fa 644 as->pts[pde] = NULL;
39abf8aa 645 }
39abf8aa
HD
646}
647
8482ee5e 648static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
e3c97196 649 u32 *pte, dma_addr_t pte_dma, u32 val)
8482ee5e
RK
650{
651 struct tegra_smmu *smmu = as->smmu;
82fa58e8 652 unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
8482ee5e
RK
653
654 *pte = val;
655
e3c97196
RK
656 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
657 4, DMA_TO_DEVICE);
658 smmu_flush_ptc(smmu, pte_dma, offset);
8482ee5e
RK
659 smmu_flush_tlb_group(smmu, as->id, iova);
660 smmu_flush(smmu);
661}
662
404d0b30
DO
663static struct page *as_get_pde_page(struct tegra_smmu_as *as,
664 unsigned long iova, gfp_t gfp,
665 unsigned long *flags)
666{
667 unsigned int pde = iova_pd_index(iova);
668 struct page *page = as->pts[pde];
669
670 /* at first check whether allocation needs to be done at all */
671 if (page)
672 return page;
673
674 /*
675 * In order to prevent exhaustion of the atomic memory pool, we
676 * allocate page in a sleeping context if GFP flags permit. Hence
677 * spinlock needs to be unlocked and re-locked after allocation.
678 */
679 if (!(gfp & __GFP_ATOMIC))
680 spin_unlock_irqrestore(&as->lock, *flags);
681
682 page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
683
684 if (!(gfp & __GFP_ATOMIC))
685 spin_lock_irqsave(&as->lock, *flags);
686
687 /*
688 * In a case of blocking allocation, a concurrent mapping may win
689 * the PDE allocation. In this case the allocated page isn't needed
690 * if allocation succeeded and the allocation failure isn't fatal.
691 */
692 if (as->pts[pde]) {
693 if (page)
694 __free_page(page);
695
696 page = as->pts[pde];
697 }
698
699 return page;
700}
701
702static int
703__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
704 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
705 unsigned long *flags)
39abf8aa 706{
d5f1a81c 707 struct tegra_smmu_as *as = to_smmu_as(domain);
e3c97196 708 dma_addr_t pte_dma;
404d0b30 709 struct page *page;
43d957b1 710 u32 pte_attrs;
89184651 711 u32 *pte;
39abf8aa 712
404d0b30
DO
713 page = as_get_pde_page(as, iova, gfp, flags);
714 if (!page)
715 return -ENOMEM;
716
717 pte = as_get_pte(as, iova, &pte_dma, page);
89184651
TR
718 if (!pte)
719 return -ENOMEM;
39abf8aa 720
7ffc6f06
RK
721 /* If we aren't overwriting a pre-existing entry, increment use */
722 if (*pte == 0)
723 tegra_smmu_pte_get_use(as, iova);
724
43d957b1
DO
725 pte_attrs = SMMU_PTE_NONSECURE;
726
727 if (prot & IOMMU_READ)
728 pte_attrs |= SMMU_PTE_READABLE;
729
730 if (prot & IOMMU_WRITE)
731 pte_attrs |= SMMU_PTE_WRITABLE;
732
e3c97196 733 tegra_smmu_set_pte(as, iova, pte, pte_dma,
82fa58e8 734 SMMU_PHYS_PFN(paddr) | pte_attrs);
39abf8aa 735
39abf8aa
HD
736 return 0;
737}
738
404d0b30
DO
739static size_t
740__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
741 size_t size, struct iommu_iotlb_gather *gather)
39abf8aa 742{
d5f1a81c 743 struct tegra_smmu_as *as = to_smmu_as(domain);
e3c97196 744 dma_addr_t pte_dma;
89184651 745 u32 *pte;
39abf8aa 746
e3c97196 747 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
b98e34f0 748 if (!pte || !*pte)
89184651 749 return 0;
39abf8aa 750
e3c97196 751 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
b98e34f0
RK
752 tegra_smmu_pte_put_use(as, iova);
753
89184651 754 return size;
39abf8aa
HD
755}
756
404d0b30
DO
757static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
758 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
759{
760 struct tegra_smmu_as *as = to_smmu_as(domain);
761 unsigned long flags;
762 int ret;
763
764 spin_lock_irqsave(&as->lock, flags);
765 ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
766 spin_unlock_irqrestore(&as->lock, flags);
767
768 return ret;
769}
770
771static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
772 size_t size, struct iommu_iotlb_gather *gather)
773{
774 struct tegra_smmu_as *as = to_smmu_as(domain);
775 unsigned long flags;
776
777 spin_lock_irqsave(&as->lock, flags);
778 size = __tegra_smmu_unmap(domain, iova, size, gather);
779 spin_unlock_irqrestore(&as->lock, flags);
780
781 return size;
782}
783
89184651
TR
784static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
785 dma_addr_t iova)
39abf8aa 786{
d5f1a81c 787 struct tegra_smmu_as *as = to_smmu_as(domain);
89184651 788 unsigned long pfn;
e3c97196 789 dma_addr_t pte_dma;
89184651 790 u32 *pte;
39abf8aa 791
e3c97196 792 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
9113785c
RK
793 if (!pte || !*pte)
794 return 0;
795
804cb54c 796 pfn = *pte & as->smmu->pfn_mask;
39abf8aa 797
82fa58e8 798 return SMMU_PFN_PHYS(pfn);
39abf8aa
HD
799}
800
89184651 801static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
7a31f6f4 802{
89184651
TR
803 struct platform_device *pdev;
804 struct tegra_mc *mc;
7a31f6f4 805
89184651
TR
806 pdev = of_find_device_by_node(np);
807 if (!pdev)
808 return NULL;
809
810 mc = platform_get_drvdata(pdev);
811 if (!mc)
812 return NULL;
813
814 return mc->smmu;
7a31f6f4
HD
815}
816
7f4c9176
TR
817static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
818 struct of_phandle_args *args)
819{
820 const struct iommu_ops *ops = smmu->iommu.ops;
821 int err;
822
823 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
824 if (err < 0) {
825 dev_err(dev, "failed to initialize fwspec: %d\n", err);
826 return err;
827 }
828
829 err = ops->of_xlate(dev, args);
830 if (err < 0) {
831 dev_err(dev, "failed to parse SW group ID: %d\n", err);
832 iommu_fwspec_free(dev);
833 return err;
834 }
835
836 return 0;
837}
838
b287ba73 839static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
7a31f6f4 840{
89184651 841 struct device_node *np = dev->of_node;
7f4c9176 842 struct tegra_smmu *smmu = NULL;
89184651
TR
843 struct of_phandle_args args;
844 unsigned int index = 0;
7f4c9176 845 int err;
7a31f6f4 846
89184651
TR
847 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
848 &args) == 0) {
89184651
TR
849 smmu = tegra_smmu_find(args.np);
850 if (smmu) {
7f4c9176
TR
851 err = tegra_smmu_configure(smmu, dev, &args);
852 of_node_put(args.np);
853
854 if (err < 0)
b287ba73 855 return ERR_PTR(err);
7f4c9176 856
89184651
TR
857 /*
858 * Only a single IOMMU master interface is currently
859 * supported by the Linux kernel, so abort after the
860 * first match.
861 */
a5616e24 862 dev_iommu_priv_set(dev, smmu);
0b480e44 863
89184651
TR
864 break;
865 }
866
7f4c9176 867 of_node_put(args.np);
89184651
TR
868 index++;
869 }
870
7f4c9176 871 if (!smmu)
b287ba73 872 return ERR_PTR(-ENODEV);
d92e1f84 873
b287ba73 874 return &smmu->iommu;
7a31f6f4
HD
875}
876
b287ba73 877static void tegra_smmu_release_device(struct device *dev)
7a31f6f4 878{
a5616e24 879 dev_iommu_priv_set(dev, NULL);
89184651 880}
7a31f6f4 881
7f4c9176
TR
882static const struct tegra_smmu_group_soc *
883tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
884{
885 unsigned int i, j;
886
887 for (i = 0; i < smmu->soc->num_groups; i++)
888 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
889 if (smmu->soc->groups[i].swgroups[j] == swgroup)
890 return &smmu->soc->groups[i];
891
892 return NULL;
893}
894
1ea5440e
TR
895static void tegra_smmu_group_release(void *iommu_data)
896{
897 struct tegra_smmu_group *group = iommu_data;
898 struct tegra_smmu *smmu = group->smmu;
899
900 mutex_lock(&smmu->lock);
901 list_del(&group->list);
902 mutex_unlock(&smmu->lock);
903}
904
7f4c9176
TR
905static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
906 unsigned int swgroup)
907{
908 const struct tegra_smmu_group_soc *soc;
909 struct tegra_smmu_group *group;
5b30fbfa 910 struct iommu_group *grp;
7f4c9176
TR
911
912 soc = tegra_smmu_find_group(smmu, swgroup);
913 if (!soc)
914 return NULL;
915
916 mutex_lock(&smmu->lock);
917
918 list_for_each_entry(group, &smmu->groups, list)
919 if (group->soc == soc) {
5b30fbfa 920 grp = iommu_group_ref_get(group->group);
7f4c9176 921 mutex_unlock(&smmu->lock);
5b30fbfa 922 return grp;
7f4c9176
TR
923 }
924
925 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
926 if (!group) {
927 mutex_unlock(&smmu->lock);
928 return NULL;
929 }
930
931 INIT_LIST_HEAD(&group->list);
1ea5440e 932 group->smmu = smmu;
7f4c9176
TR
933 group->soc = soc;
934
935 group->group = iommu_group_alloc();
83476bfa 936 if (IS_ERR(group->group)) {
7f4c9176
TR
937 devm_kfree(smmu->dev, group);
938 mutex_unlock(&smmu->lock);
939 return NULL;
940 }
941
1ea5440e 942 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
00295702 943 iommu_group_set_name(group->group, soc->name);
7f4c9176
TR
944 list_add_tail(&group->list, &smmu->groups);
945 mutex_unlock(&smmu->lock);
946
947 return group->group;
948}
949
950static struct iommu_group *tegra_smmu_device_group(struct device *dev)
951{
db5d6a70 952 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
a5616e24 953 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
7f4c9176
TR
954 struct iommu_group *group;
955
956 group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
957 if (!group)
958 group = generic_device_group(dev);
959
960 return group;
961}
962
963static int tegra_smmu_of_xlate(struct device *dev,
964 struct of_phandle_args *args)
965{
966 u32 id = args->args[0];
967
968 return iommu_fwspec_add_ids(dev, &id, 1);
969}
970
89184651
TR
971static const struct iommu_ops tegra_smmu_ops = {
972 .capable = tegra_smmu_capable,
d5f1a81c
JR
973 .domain_alloc = tegra_smmu_domain_alloc,
974 .domain_free = tegra_smmu_domain_free,
89184651
TR
975 .attach_dev = tegra_smmu_attach_dev,
976 .detach_dev = tegra_smmu_detach_dev,
b287ba73
JR
977 .probe_device = tegra_smmu_probe_device,
978 .release_device = tegra_smmu_release_device,
7f4c9176 979 .device_group = tegra_smmu_device_group,
89184651
TR
980 .map = tegra_smmu_map,
981 .unmap = tegra_smmu_unmap,
89184651 982 .iova_to_phys = tegra_smmu_iova_to_phys,
7f4c9176 983 .of_xlate = tegra_smmu_of_xlate,
89184651
TR
984 .pgsize_bitmap = SZ_4K,
985};
7a31f6f4 986
89184651
TR
987static void tegra_smmu_ahb_enable(void)
988{
989 static const struct of_device_id ahb_match[] = {
990 { .compatible = "nvidia,tegra30-ahb", },
991 { }
992 };
993 struct device_node *ahb;
7a31f6f4 994
89184651
TR
995 ahb = of_find_matching_node(NULL, ahb_match);
996 if (ahb) {
997 tegra_ahb_enable_smmu(ahb);
998 of_node_put(ahb);
7a31f6f4 999 }
89184651 1000}
7a31f6f4 1001
d1313e78
TR
1002static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
1003{
1004 struct tegra_smmu *smmu = s->private;
1005 unsigned int i;
1006 u32 value;
1007
1008 seq_printf(s, "swgroup enabled ASID\n");
1009 seq_printf(s, "------------------------\n");
1010
1011 for (i = 0; i < smmu->soc->num_swgroups; i++) {
1012 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1013 const char *status;
1014 unsigned int asid;
1015
1016 value = smmu_readl(smmu, group->reg);
1017
1018 if (value & SMMU_ASID_ENABLE)
1019 status = "yes";
1020 else
1021 status = "no";
1022
1023 asid = value & SMMU_ASID_MASK;
1024
1025 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
1026 asid);
1027 }
1028
1029 return 0;
1030}
1031
062e52a5 1032DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
d1313e78
TR
1033
1034static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1035{
1036 struct tegra_smmu *smmu = s->private;
1037 unsigned int i;
1038 u32 value;
1039
1040 seq_printf(s, "client enabled\n");
1041 seq_printf(s, "--------------------\n");
1042
1043 for (i = 0; i < smmu->soc->num_clients; i++) {
1044 const struct tegra_mc_client *client = &smmu->soc->clients[i];
1045 const char *status;
1046
1047 value = smmu_readl(smmu, client->smmu.reg);
1048
1049 if (value & BIT(client->smmu.bit))
1050 status = "yes";
1051 else
1052 status = "no";
1053
1054 seq_printf(s, "%-12s %s\n", client->name, status);
1055 }
1056
1057 return 0;
1058}
1059
062e52a5 1060DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
d1313e78
TR
1061
1062static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1063{
1064 smmu->debugfs = debugfs_create_dir("smmu", NULL);
1065 if (!smmu->debugfs)
1066 return;
1067
1068 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1069 &tegra_smmu_swgroups_fops);
1070 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1071 &tegra_smmu_clients_fops);
1072}
1073
1074static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1075{
1076 debugfs_remove_recursive(smmu->debugfs);
1077}
1078
89184651
TR
1079struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1080 const struct tegra_smmu_soc *soc,
1081 struct tegra_mc *mc)
1082{
1083 struct tegra_smmu *smmu;
1084 size_t size;
1085 u32 value;
1086 int err;
7a31f6f4 1087
89184651
TR
1088 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1089 if (!smmu)
1090 return ERR_PTR(-ENOMEM);
0760e8fa 1091
89184651
TR
1092 /*
1093 * This is a bit of a hack. Ideally we'd want to simply return this
1094 * value. However the IOMMU registration process will attempt to add
1095 * all devices to the IOMMU when bus_set_iommu() is called. In order
1096 * not to rely on global variables to track the IOMMU instance, we
b287ba73 1097 * set it here so that it can be looked up from the .probe_device()
89184651
TR
1098 * callback via the IOMMU device's .drvdata field.
1099 */
1100 mc->smmu = smmu;
0760e8fa 1101
89184651 1102 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
0760e8fa 1103
89184651
TR
1104 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1105 if (!smmu->asids)
1106 return ERR_PTR(-ENOMEM);
7a31f6f4 1107
7f4c9176 1108 INIT_LIST_HEAD(&smmu->groups);
89184651 1109 mutex_init(&smmu->lock);
7a31f6f4 1110
89184651
TR
1111 smmu->regs = mc->regs;
1112 smmu->soc = soc;
1113 smmu->dev = dev;
1114 smmu->mc = mc;
7a31f6f4 1115
82fa58e8
NC
1116 smmu->pfn_mask =
1117 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
804cb54c
TR
1118 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1119 mc->soc->num_address_bits, smmu->pfn_mask);
d5c152c3 1120 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
11cec15b
TR
1121 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1122 smmu->tlb_mask);
804cb54c 1123
89184651 1124 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
7a31f6f4 1125
89184651
TR
1126 if (soc->supports_request_limit)
1127 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
39abf8aa 1128
89184651 1129 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
7a31f6f4 1130
89184651 1131 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
11cec15b 1132 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
7a31f6f4 1133
89184651
TR
1134 if (soc->supports_round_robin_arbitration)
1135 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
7a31f6f4 1136
89184651 1137 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
7a31f6f4 1138
b8fe0382 1139 smmu_flush_ptc_all(smmu);
89184651
TR
1140 smmu_flush_tlb(smmu);
1141 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1142 smmu_flush(smmu);
1143
1144 tegra_smmu_ahb_enable();
7a31f6f4 1145
0b480e44
JR
1146 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1147 if (err)
1148 return ERR_PTR(err);
1149
1150 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
7f4c9176 1151 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
0b480e44
JR
1152
1153 err = iommu_device_register(&smmu->iommu);
1154 if (err) {
1155 iommu_device_sysfs_remove(&smmu->iommu);
1156 return ERR_PTR(err);
1157 }
1158
96302d89
JR
1159 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1160 if (err < 0) {
1161 iommu_device_unregister(&smmu->iommu);
1162 iommu_device_sysfs_remove(&smmu->iommu);
1163 return ERR_PTR(err);
1164 }
1165
d1313e78
TR
1166 if (IS_ENABLED(CONFIG_DEBUG_FS))
1167 tegra_smmu_debugfs_init(smmu);
1168
89184651
TR
1169 return smmu;
1170}
d1313e78
TR
1171
1172void tegra_smmu_remove(struct tegra_smmu *smmu)
1173{
0b480e44
JR
1174 iommu_device_unregister(&smmu->iommu);
1175 iommu_device_sysfs_remove(&smmu->iommu);
1176
d1313e78
TR
1177 if (IS_ENABLED(CONFIG_DEBUG_FS))
1178 tegra_smmu_debugfs_exit(smmu);
1179}