2 * IOMMU API for GART in Tegra20
4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/moduleparam.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
31 #include <soc/tegra/mc.h>
33 /* bitmap of the page sizes currently supported */
34 #define GART_IOMMU_PGSIZES (SZ_4K)
36 #define GART_REG_BASE 0x24
37 #define GART_CONFIG (0x24 - GART_REG_BASE)
38 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
39 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
40 #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
42 #define GART_PAGE_SHIFT 12
43 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
44 #define GART_PAGE_MASK \
45 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
49 struct list_head list;
55 u32 page_count; /* total remappable size */
56 dma_addr_t iovmm_base; /* offset to vmm_area */
57 spinlock_t pte_lock; /* for pagetable */
58 struct list_head client;
59 spinlock_t client_lock; /* for client list */
62 struct iommu_device iommu; /* IOMMU Core handle */
66 struct iommu_domain domain; /* generic domain handle */
67 struct gart_device *gart; /* link to gart device */
70 static struct gart_device *gart_handle; /* unique for a system */
72 static bool gart_debug;
74 #define GART_PTE(_pfn) \
75 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
77 static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
79 return container_of(dom, struct gart_domain, domain);
83 * Any interaction between any block on PPSB and a block on APB or AHB
84 * must have these read-back to ensure the APB/AHB bus transaction is
85 * complete before initiating activity on the PPSB block.
87 #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
89 #define for_each_gart_pte(gart, iova) \
90 for (iova = gart->iovmm_base; \
91 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
92 iova += GART_PAGE_SIZE)
94 static inline void gart_set_pte(struct gart_device *gart,
95 unsigned long offs, u32 pte)
97 writel(offs, gart->regs + GART_ENTRY_ADDR);
98 writel(pte, gart->regs + GART_ENTRY_DATA);
100 dev_dbg(gart->dev, "%s %08lx:%08x\n",
101 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
104 static inline unsigned long gart_read_pte(struct gart_device *gart,
109 writel(offs, gart->regs + GART_ENTRY_ADDR);
110 pte = readl(gart->regs + GART_ENTRY_DATA);
115 static void do_gart_setup(struct gart_device *gart, const u32 *data)
119 for_each_gart_pte(gart, iova)
120 gart_set_pte(gart, iova, data ? *(data++) : 0);
122 writel(1, gart->regs + GART_CONFIG);
123 FLUSH_GART_REGS(gart);
127 static void gart_dump_table(struct gart_device *gart)
132 spin_lock_irqsave(&gart->pte_lock, flags);
133 for_each_gart_pte(gart, iova) {
136 pte = gart_read_pte(gart, iova);
138 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
139 (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
140 iova, pte & GART_PAGE_MASK);
142 spin_unlock_irqrestore(&gart->pte_lock, flags);
145 static inline void gart_dump_table(struct gart_device *gart)
150 static inline bool gart_iova_range_valid(struct gart_device *gart,
151 unsigned long iova, size_t bytes)
153 unsigned long iova_start, iova_end, gart_start, gart_end;
156 iova_end = iova_start + bytes - 1;
157 gart_start = gart->iovmm_base;
158 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
160 if (iova_start < gart_start)
162 if (iova_end > gart_end)
167 static int gart_iommu_attach_dev(struct iommu_domain *domain,
170 struct gart_domain *gart_domain = to_gart_domain(domain);
171 struct gart_device *gart = gart_domain->gart;
172 struct gart_client *client, *c;
175 client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
180 spin_lock(&gart->client_lock);
181 list_for_each_entry(c, &gart->client, list) {
184 "%s is already attached\n", dev_name(dev));
189 list_add(&client->list, &gart->client);
190 spin_unlock(&gart->client_lock);
191 dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
195 devm_kfree(gart->dev, client);
196 spin_unlock(&gart->client_lock);
200 static void gart_iommu_detach_dev(struct iommu_domain *domain,
203 struct gart_domain *gart_domain = to_gart_domain(domain);
204 struct gart_device *gart = gart_domain->gart;
205 struct gart_client *c;
207 spin_lock(&gart->client_lock);
209 list_for_each_entry(c, &gart->client, list) {
212 devm_kfree(gart->dev, c);
213 dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
217 dev_err(gart->dev, "Couldn't find\n");
219 spin_unlock(&gart->client_lock);
222 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
224 struct gart_domain *gart_domain;
225 struct gart_device *gart;
227 if (type != IOMMU_DOMAIN_UNMANAGED)
234 gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
238 gart_domain->gart = gart;
239 gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
240 gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
241 gart->page_count * GART_PAGE_SIZE - 1;
242 gart_domain->domain.geometry.force_aperture = true;
244 return &gart_domain->domain;
247 static void gart_iommu_domain_free(struct iommu_domain *domain)
249 struct gart_domain *gart_domain = to_gart_domain(domain);
250 struct gart_device *gart = gart_domain->gart;
253 spin_lock(&gart->client_lock);
254 if (!list_empty(&gart->client)) {
255 struct gart_client *c;
257 list_for_each_entry(c, &gart->client, list)
258 gart_iommu_detach_dev(domain, c->dev);
260 spin_unlock(&gart->client_lock);
266 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
267 phys_addr_t pa, size_t bytes, int prot)
269 struct gart_domain *gart_domain = to_gart_domain(domain);
270 struct gart_device *gart = gart_domain->gart;
275 if (!gart_iova_range_valid(gart, iova, bytes))
278 spin_lock_irqsave(&gart->pte_lock, flags);
279 pfn = __phys_to_pfn(pa);
280 if (!pfn_valid(pfn)) {
281 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
282 spin_unlock_irqrestore(&gart->pte_lock, flags);
286 pte = gart_read_pte(gart, iova);
287 if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
288 spin_unlock_irqrestore(&gart->pte_lock, flags);
289 dev_err(gart->dev, "Page entry is in-use\n");
293 gart_set_pte(gart, iova, GART_PTE(pfn));
294 spin_unlock_irqrestore(&gart->pte_lock, flags);
298 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
301 struct gart_domain *gart_domain = to_gart_domain(domain);
302 struct gart_device *gart = gart_domain->gart;
305 if (!gart_iova_range_valid(gart, iova, bytes))
308 spin_lock_irqsave(&gart->pte_lock, flags);
309 gart_set_pte(gart, iova, 0);
310 spin_unlock_irqrestore(&gart->pte_lock, flags);
314 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
317 struct gart_domain *gart_domain = to_gart_domain(domain);
318 struct gart_device *gart = gart_domain->gart;
323 if (!gart_iova_range_valid(gart, iova, 0))
326 spin_lock_irqsave(&gart->pte_lock, flags);
327 pte = gart_read_pte(gart, iova);
328 spin_unlock_irqrestore(&gart->pte_lock, flags);
330 pa = (pte & GART_PAGE_MASK);
331 if (!pfn_valid(__phys_to_pfn(pa))) {
332 dev_err(gart->dev, "No entry for %08llx:%pa\n",
333 (unsigned long long)iova, &pa);
334 gart_dump_table(gart);
340 static bool gart_iommu_capable(enum iommu_cap cap)
345 static int gart_iommu_add_device(struct device *dev)
347 struct iommu_group *group;
349 if (!dev->iommu_fwspec)
352 group = iommu_group_get_for_dev(dev);
354 return PTR_ERR(group);
356 iommu_group_put(group);
358 iommu_device_link(&gart_handle->iommu, dev);
363 static void gart_iommu_remove_device(struct device *dev)
365 iommu_group_remove_device(dev);
366 iommu_device_unlink(&gart_handle->iommu, dev);
369 static int gart_iommu_of_xlate(struct device *dev,
370 struct of_phandle_args *args)
375 static void gart_iommu_sync(struct iommu_domain *domain)
377 struct gart_domain *gart_domain = to_gart_domain(domain);
378 struct gart_device *gart = gart_domain->gart;
380 FLUSH_GART_REGS(gart);
383 static const struct iommu_ops gart_iommu_ops = {
384 .capable = gart_iommu_capable,
385 .domain_alloc = gart_iommu_domain_alloc,
386 .domain_free = gart_iommu_domain_free,
387 .attach_dev = gart_iommu_attach_dev,
388 .detach_dev = gart_iommu_detach_dev,
389 .add_device = gart_iommu_add_device,
390 .remove_device = gart_iommu_remove_device,
391 .device_group = generic_device_group,
392 .map = gart_iommu_map,
393 .unmap = gart_iommu_unmap,
394 .iova_to_phys = gart_iommu_iova_to_phys,
395 .pgsize_bitmap = GART_IOMMU_PGSIZES,
396 .of_xlate = gart_iommu_of_xlate,
397 .iotlb_sync_map = gart_iommu_sync,
398 .iotlb_sync = gart_iommu_sync,
401 int tegra_gart_suspend(struct gart_device *gart)
404 u32 *data = gart->savedata;
407 spin_lock_irqsave(&gart->pte_lock, flags);
408 for_each_gart_pte(gart, iova)
409 *(data++) = gart_read_pte(gart, iova);
410 spin_unlock_irqrestore(&gart->pte_lock, flags);
414 int tegra_gart_resume(struct gart_device *gart)
418 spin_lock_irqsave(&gart->pte_lock, flags);
419 do_gart_setup(gart, gart->savedata);
420 spin_unlock_irqrestore(&gart->pte_lock, flags);
424 struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
426 struct gart_device *gart;
427 struct resource *res_remap;
428 void __iomem *gart_regs;
431 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
433 /* the GART memory aperture is required */
434 res_remap = platform_get_resource(to_platform_device(dev),
437 dev_err(dev, "GART memory aperture expected\n");
438 return ERR_PTR(-ENXIO);
441 gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
443 dev_err(dev, "failed to allocate gart_device\n");
444 return ERR_PTR(-ENOMEM);
447 ret = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
449 dev_err(dev, "Failed to register IOMMU in sysfs\n");
453 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
454 iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
456 ret = iommu_device_register(&gart->iommu);
458 dev_err(dev, "Failed to register IOMMU\n");
463 gart_regs = mc->regs + GART_REG_BASE;
464 spin_lock_init(&gart->pte_lock);
465 spin_lock_init(&gart->client_lock);
466 INIT_LIST_HEAD(&gart->client);
467 gart->regs = gart_regs;
468 gart->iovmm_base = (dma_addr_t)res_remap->start;
469 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
471 gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
472 if (!gart->savedata) {
473 dev_err(dev, "failed to allocate context save area\n");
475 goto unregister_iommu;
478 do_gart_setup(gart, NULL);
485 iommu_device_unregister(&gart->iommu);
487 iommu_device_sysfs_remove(&gart->iommu);
492 module_param(gart_debug, bool, 0644);
493 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");