2 * IOMMU API for GART in Tegra20
4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 #define dev_fmt(fmt) "gart: " fmt
25 #include <linux/iommu.h>
26 #include <linux/moduleparam.h>
27 #include <linux/platform_device.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/vmalloc.h>
32 #include <soc/tegra/mc.h>
34 /* bitmap of the page sizes currently supported */
35 #define GART_IOMMU_PGSIZES (SZ_4K)
37 #define GART_REG_BASE 0x24
38 #define GART_CONFIG (0x24 - GART_REG_BASE)
39 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
40 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
41 #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
43 #define GART_PAGE_SHIFT 12
44 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
45 #define GART_PAGE_MASK \
46 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
51 u32 page_count; /* total remappable size */
52 dma_addr_t iovmm_base; /* offset to vmm_area */
53 spinlock_t pte_lock; /* for pagetable */
54 spinlock_t dom_lock; /* for active domain */
55 unsigned int active_devices; /* number of active devices */
56 struct iommu_domain *active_domain; /* current active domain */
59 struct iommu_device iommu; /* IOMMU Core handle */
62 static struct gart_device *gart_handle; /* unique for a system */
64 static bool gart_debug;
66 #define GART_PTE(_pfn) \
67 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
70 * Any interaction between any block on PPSB and a block on APB or AHB
71 * must have these read-back to ensure the APB/AHB bus transaction is
72 * complete before initiating activity on the PPSB block.
74 #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
76 #define for_each_gart_pte(gart, iova) \
77 for (iova = gart->iovmm_base; \
78 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
79 iova += GART_PAGE_SIZE)
81 static inline void gart_set_pte(struct gart_device *gart,
82 unsigned long offs, u32 pte)
84 writel(offs, gart->regs + GART_ENTRY_ADDR);
85 writel(pte, gart->regs + GART_ENTRY_DATA);
87 dev_dbg(gart->dev, "%s %08lx:%08x\n",
88 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
91 static inline unsigned long gart_read_pte(struct gart_device *gart,
96 writel(offs, gart->regs + GART_ENTRY_ADDR);
97 pte = readl(gart->regs + GART_ENTRY_DATA);
102 static void do_gart_setup(struct gart_device *gart, const u32 *data)
106 for_each_gart_pte(gart, iova)
107 gart_set_pte(gart, iova, data ? *(data++) : 0);
109 writel(1, gart->regs + GART_CONFIG);
110 FLUSH_GART_REGS(gart);
114 static void gart_dump_table(struct gart_device *gart)
119 spin_lock_irqsave(&gart->pte_lock, flags);
120 for_each_gart_pte(gart, iova) {
123 pte = gart_read_pte(gart, iova);
125 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
126 (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
127 iova, pte & GART_PAGE_MASK);
129 spin_unlock_irqrestore(&gart->pte_lock, flags);
132 static inline void gart_dump_table(struct gart_device *gart)
137 static inline bool gart_iova_range_valid(struct gart_device *gart,
138 unsigned long iova, size_t bytes)
140 unsigned long iova_start, iova_end, gart_start, gart_end;
143 iova_end = iova_start + bytes - 1;
144 gart_start = gart->iovmm_base;
145 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
147 if (iova_start < gart_start)
149 if (iova_end > gart_end)
154 static int gart_iommu_attach_dev(struct iommu_domain *domain,
157 struct gart_device *gart = gart_handle;
160 spin_lock(&gart->dom_lock);
162 if (gart->active_domain && gart->active_domain != domain) {
164 } else if (dev->archdata.iommu != domain) {
165 dev->archdata.iommu = domain;
166 gart->active_domain = domain;
167 gart->active_devices++;
170 spin_unlock(&gart->dom_lock);
175 static void gart_iommu_detach_dev(struct iommu_domain *domain,
178 struct gart_device *gart = gart_handle;
180 spin_lock(&gart->dom_lock);
182 if (dev->archdata.iommu == domain) {
183 dev->archdata.iommu = NULL;
185 if (--gart->active_devices == 0)
186 gart->active_domain = NULL;
189 spin_unlock(&gart->dom_lock);
192 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
194 struct gart_device *gart = gart_handle;
195 struct iommu_domain *domain;
197 if (type != IOMMU_DOMAIN_UNMANAGED)
200 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
202 domain->geometry.aperture_start = gart->iovmm_base;
203 domain->geometry.aperture_end = gart->iovmm_base +
204 gart->page_count * GART_PAGE_SIZE - 1;
205 domain->geometry.force_aperture = true;
211 static void gart_iommu_domain_free(struct iommu_domain *domain)
213 WARN_ON(gart_handle->active_domain == domain);
217 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
218 phys_addr_t pa, size_t bytes, int prot)
220 struct gart_device *gart = gart_handle;
225 if (!gart_iova_range_valid(gart, iova, bytes))
228 spin_lock_irqsave(&gart->pte_lock, flags);
229 pfn = __phys_to_pfn(pa);
230 if (!pfn_valid(pfn)) {
231 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
232 spin_unlock_irqrestore(&gart->pte_lock, flags);
236 pte = gart_read_pte(gart, iova);
237 if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
238 spin_unlock_irqrestore(&gart->pte_lock, flags);
239 dev_err(gart->dev, "Page entry is in-use\n");
243 gart_set_pte(gart, iova, GART_PTE(pfn));
244 spin_unlock_irqrestore(&gart->pte_lock, flags);
248 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
251 struct gart_device *gart = gart_handle;
254 if (!gart_iova_range_valid(gart, iova, bytes))
257 spin_lock_irqsave(&gart->pte_lock, flags);
258 gart_set_pte(gart, iova, 0);
259 spin_unlock_irqrestore(&gart->pte_lock, flags);
263 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
266 struct gart_device *gart = gart_handle;
271 if (!gart_iova_range_valid(gart, iova, 0))
274 spin_lock_irqsave(&gart->pte_lock, flags);
275 pte = gart_read_pte(gart, iova);
276 spin_unlock_irqrestore(&gart->pte_lock, flags);
278 pa = (pte & GART_PAGE_MASK);
279 if (!pfn_valid(__phys_to_pfn(pa))) {
280 dev_err(gart->dev, "No entry for %08llx:%pa\n",
281 (unsigned long long)iova, &pa);
282 gart_dump_table(gart);
288 static bool gart_iommu_capable(enum iommu_cap cap)
293 static int gart_iommu_add_device(struct device *dev)
295 struct iommu_group *group;
297 if (!dev->iommu_fwspec)
300 group = iommu_group_get_for_dev(dev);
302 return PTR_ERR(group);
304 iommu_group_put(group);
306 iommu_device_link(&gart_handle->iommu, dev);
311 static void gart_iommu_remove_device(struct device *dev)
313 iommu_group_remove_device(dev);
314 iommu_device_unlink(&gart_handle->iommu, dev);
317 static int gart_iommu_of_xlate(struct device *dev,
318 struct of_phandle_args *args)
323 static void gart_iommu_sync(struct iommu_domain *domain)
325 struct gart_device *gart = gart_handle;
327 FLUSH_GART_REGS(gart);
330 static const struct iommu_ops gart_iommu_ops = {
331 .capable = gart_iommu_capable,
332 .domain_alloc = gart_iommu_domain_alloc,
333 .domain_free = gart_iommu_domain_free,
334 .attach_dev = gart_iommu_attach_dev,
335 .detach_dev = gart_iommu_detach_dev,
336 .add_device = gart_iommu_add_device,
337 .remove_device = gart_iommu_remove_device,
338 .device_group = generic_device_group,
339 .map = gart_iommu_map,
340 .unmap = gart_iommu_unmap,
341 .iova_to_phys = gart_iommu_iova_to_phys,
342 .pgsize_bitmap = GART_IOMMU_PGSIZES,
343 .of_xlate = gart_iommu_of_xlate,
344 .iotlb_sync_map = gart_iommu_sync,
345 .iotlb_sync = gart_iommu_sync,
348 int tegra_gart_suspend(struct gart_device *gart)
351 u32 *data = gart->savedata;
354 spin_lock_irqsave(&gart->pte_lock, flags);
355 for_each_gart_pte(gart, iova)
356 *(data++) = gart_read_pte(gart, iova);
357 spin_unlock_irqrestore(&gart->pte_lock, flags);
361 int tegra_gart_resume(struct gart_device *gart)
365 spin_lock_irqsave(&gart->pte_lock, flags);
366 do_gart_setup(gart, gart->savedata);
367 spin_unlock_irqrestore(&gart->pte_lock, flags);
371 struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
373 struct gart_device *gart;
374 struct resource *res_remap;
375 void __iomem *gart_regs;
378 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
380 /* the GART memory aperture is required */
381 res_remap = platform_get_resource(to_platform_device(dev),
384 dev_err(dev, "GART memory aperture expected\n");
385 return ERR_PTR(-ENXIO);
388 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
390 dev_err(dev, "failed to allocate gart_device\n");
391 return ERR_PTR(-ENOMEM);
394 ret = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
396 dev_err(dev, "Failed to register IOMMU in sysfs\n");
400 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
401 iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
403 ret = iommu_device_register(&gart->iommu);
405 dev_err(dev, "Failed to register IOMMU\n");
410 gart_regs = mc->regs + GART_REG_BASE;
411 spin_lock_init(&gart->pte_lock);
412 spin_lock_init(&gart->dom_lock);
413 gart->regs = gart_regs;
414 gart->iovmm_base = (dma_addr_t)res_remap->start;
415 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
417 gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
418 if (!gart->savedata) {
419 dev_err(dev, "failed to allocate context save area\n");
421 goto unregister_iommu;
424 do_gart_setup(gart, NULL);
431 iommu_device_unregister(&gart->iommu);
433 iommu_device_sysfs_remove(&gart->iommu);
440 module_param(gart_debug, bool, 0644);
441 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");