iommu/tegra: gart: Integrate with Memory Controller driver
[linux-2.6-block.git] / drivers / iommu / tegra-gart.c
1 /*
2  * IOMMU API for GART in Tegra20
3  *
4  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * Author: Hiroshi DOYU <hdoyu@nvidia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include <linux/io.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/moduleparam.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
30
31 #include <soc/tegra/mc.h>
32
33 /* bitmap of the page sizes currently supported */
34 #define GART_IOMMU_PGSIZES      (SZ_4K)
35
36 #define GART_REG_BASE           0x24
37 #define GART_CONFIG             (0x24 - GART_REG_BASE)
38 #define GART_ENTRY_ADDR         (0x28 - GART_REG_BASE)
39 #define GART_ENTRY_DATA         (0x2c - GART_REG_BASE)
40 #define GART_ENTRY_PHYS_ADDR_VALID      (1 << 31)
41
42 #define GART_PAGE_SHIFT         12
43 #define GART_PAGE_SIZE          (1 << GART_PAGE_SHIFT)
44 #define GART_PAGE_MASK                                          \
45         (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
46
47 struct gart_client {
48         struct device           *dev;
49         struct list_head        list;
50 };
51
52 struct gart_device {
53         void __iomem            *regs;
54         u32                     *savedata;
55         u32                     page_count;     /* total remappable size */
56         dma_addr_t              iovmm_base;     /* offset to vmm_area */
57         spinlock_t              pte_lock;       /* for pagetable */
58         struct list_head        client;
59         spinlock_t              client_lock;    /* for client list */
60         struct device           *dev;
61
62         struct iommu_device     iommu;          /* IOMMU Core handle */
63 };
64
65 struct gart_domain {
66         struct iommu_domain domain;             /* generic domain handle */
67         struct gart_device *gart;               /* link to gart device   */
68 };
69
70 static struct gart_device *gart_handle; /* unique for a system */
71
72 static bool gart_debug;
73
74 #define GART_PTE(_pfn)                                          \
75         (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
76
77 static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
78 {
79         return container_of(dom, struct gart_domain, domain);
80 }
81
82 /*
83  * Any interaction between any block on PPSB and a block on APB or AHB
84  * must have these read-back to ensure the APB/AHB bus transaction is
85  * complete before initiating activity on the PPSB block.
86  */
87 #define FLUSH_GART_REGS(gart)   ((void)readl((gart)->regs + GART_CONFIG))
88
89 #define for_each_gart_pte(gart, iova)                                   \
90         for (iova = gart->iovmm_base;                                   \
91              iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
92              iova += GART_PAGE_SIZE)
93
94 static inline void gart_set_pte(struct gart_device *gart,
95                                 unsigned long offs, u32 pte)
96 {
97         writel(offs, gart->regs + GART_ENTRY_ADDR);
98         writel(pte, gart->regs + GART_ENTRY_DATA);
99
100         dev_dbg(gart->dev, "%s %08lx:%08x\n",
101                  pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
102 }
103
104 static inline unsigned long gart_read_pte(struct gart_device *gart,
105                                           unsigned long offs)
106 {
107         unsigned long pte;
108
109         writel(offs, gart->regs + GART_ENTRY_ADDR);
110         pte = readl(gart->regs + GART_ENTRY_DATA);
111
112         return pte;
113 }
114
115 static void do_gart_setup(struct gart_device *gart, const u32 *data)
116 {
117         unsigned long iova;
118
119         for_each_gart_pte(gart, iova)
120                 gart_set_pte(gart, iova, data ? *(data++) : 0);
121
122         writel(1, gart->regs + GART_CONFIG);
123         FLUSH_GART_REGS(gart);
124 }
125
126 #ifdef DEBUG
127 static void gart_dump_table(struct gart_device *gart)
128 {
129         unsigned long iova;
130         unsigned long flags;
131
132         spin_lock_irqsave(&gart->pte_lock, flags);
133         for_each_gart_pte(gart, iova) {
134                 unsigned long pte;
135
136                 pte = gart_read_pte(gart, iova);
137
138                 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
139                         (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
140                         iova, pte & GART_PAGE_MASK);
141         }
142         spin_unlock_irqrestore(&gart->pte_lock, flags);
143 }
144 #else
145 static inline void gart_dump_table(struct gart_device *gart)
146 {
147 }
148 #endif
149
150 static inline bool gart_iova_range_valid(struct gart_device *gart,
151                                          unsigned long iova, size_t bytes)
152 {
153         unsigned long iova_start, iova_end, gart_start, gart_end;
154
155         iova_start = iova;
156         iova_end = iova_start + bytes - 1;
157         gart_start = gart->iovmm_base;
158         gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
159
160         if (iova_start < gart_start)
161                 return false;
162         if (iova_end > gart_end)
163                 return false;
164         return true;
165 }
166
167 static int gart_iommu_attach_dev(struct iommu_domain *domain,
168                                  struct device *dev)
169 {
170         struct gart_domain *gart_domain = to_gart_domain(domain);
171         struct gart_device *gart = gart_domain->gart;
172         struct gart_client *client, *c;
173         int err = 0;
174
175         client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
176         if (!client)
177                 return -ENOMEM;
178         client->dev = dev;
179
180         spin_lock(&gart->client_lock);
181         list_for_each_entry(c, &gart->client, list) {
182                 if (c->dev == dev) {
183                         dev_err(gart->dev,
184                                 "%s is already attached\n", dev_name(dev));
185                         err = -EINVAL;
186                         goto fail;
187                 }
188         }
189         list_add(&client->list, &gart->client);
190         spin_unlock(&gart->client_lock);
191         dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
192         return 0;
193
194 fail:
195         devm_kfree(gart->dev, client);
196         spin_unlock(&gart->client_lock);
197         return err;
198 }
199
200 static void gart_iommu_detach_dev(struct iommu_domain *domain,
201                                   struct device *dev)
202 {
203         struct gart_domain *gart_domain = to_gart_domain(domain);
204         struct gart_device *gart = gart_domain->gart;
205         struct gart_client *c;
206
207         spin_lock(&gart->client_lock);
208
209         list_for_each_entry(c, &gart->client, list) {
210                 if (c->dev == dev) {
211                         list_del(&c->list);
212                         devm_kfree(gart->dev, c);
213                         dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
214                         goto out;
215                 }
216         }
217         dev_err(gart->dev, "Couldn't find\n");
218 out:
219         spin_unlock(&gart->client_lock);
220 }
221
222 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
223 {
224         struct gart_domain *gart_domain;
225         struct gart_device *gart;
226
227         if (type != IOMMU_DOMAIN_UNMANAGED)
228                 return NULL;
229
230         gart = gart_handle;
231         if (!gart)
232                 return NULL;
233
234         gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
235         if (!gart_domain)
236                 return NULL;
237
238         gart_domain->gart = gart;
239         gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
240         gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
241                                         gart->page_count * GART_PAGE_SIZE - 1;
242         gart_domain->domain.geometry.force_aperture = true;
243
244         return &gart_domain->domain;
245 }
246
247 static void gart_iommu_domain_free(struct iommu_domain *domain)
248 {
249         struct gart_domain *gart_domain = to_gart_domain(domain);
250         struct gart_device *gart = gart_domain->gart;
251
252         if (gart) {
253                 spin_lock(&gart->client_lock);
254                 if (!list_empty(&gart->client)) {
255                         struct gart_client *c;
256
257                         list_for_each_entry(c, &gart->client, list)
258                                 gart_iommu_detach_dev(domain, c->dev);
259                 }
260                 spin_unlock(&gart->client_lock);
261         }
262
263         kfree(gart_domain);
264 }
265
266 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
267                           phys_addr_t pa, size_t bytes, int prot)
268 {
269         struct gart_domain *gart_domain = to_gart_domain(domain);
270         struct gart_device *gart = gart_domain->gart;
271         unsigned long flags;
272         unsigned long pfn;
273         unsigned long pte;
274
275         if (!gart_iova_range_valid(gart, iova, bytes))
276                 return -EINVAL;
277
278         spin_lock_irqsave(&gart->pte_lock, flags);
279         pfn = __phys_to_pfn(pa);
280         if (!pfn_valid(pfn)) {
281                 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
282                 spin_unlock_irqrestore(&gart->pte_lock, flags);
283                 return -EINVAL;
284         }
285         if (gart_debug) {
286                 pte = gart_read_pte(gart, iova);
287                 if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
288                         spin_unlock_irqrestore(&gart->pte_lock, flags);
289                         dev_err(gart->dev, "Page entry is in-use\n");
290                         return -EBUSY;
291                 }
292         }
293         gart_set_pte(gart, iova, GART_PTE(pfn));
294         spin_unlock_irqrestore(&gart->pte_lock, flags);
295         return 0;
296 }
297
298 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
299                                size_t bytes)
300 {
301         struct gart_domain *gart_domain = to_gart_domain(domain);
302         struct gart_device *gart = gart_domain->gart;
303         unsigned long flags;
304
305         if (!gart_iova_range_valid(gart, iova, bytes))
306                 return 0;
307
308         spin_lock_irqsave(&gart->pte_lock, flags);
309         gart_set_pte(gart, iova, 0);
310         spin_unlock_irqrestore(&gart->pte_lock, flags);
311         return bytes;
312 }
313
314 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
315                                            dma_addr_t iova)
316 {
317         struct gart_domain *gart_domain = to_gart_domain(domain);
318         struct gart_device *gart = gart_domain->gart;
319         unsigned long pte;
320         phys_addr_t pa;
321         unsigned long flags;
322
323         if (!gart_iova_range_valid(gart, iova, 0))
324                 return -EINVAL;
325
326         spin_lock_irqsave(&gart->pte_lock, flags);
327         pte = gart_read_pte(gart, iova);
328         spin_unlock_irqrestore(&gart->pte_lock, flags);
329
330         pa = (pte & GART_PAGE_MASK);
331         if (!pfn_valid(__phys_to_pfn(pa))) {
332                 dev_err(gart->dev, "No entry for %08llx:%pa\n",
333                          (unsigned long long)iova, &pa);
334                 gart_dump_table(gart);
335                 return -EINVAL;
336         }
337         return pa;
338 }
339
340 static bool gart_iommu_capable(enum iommu_cap cap)
341 {
342         return false;
343 }
344
345 static int gart_iommu_add_device(struct device *dev)
346 {
347         struct iommu_group *group;
348
349         if (!dev->iommu_fwspec)
350                 return -ENODEV;
351
352         group = iommu_group_get_for_dev(dev);
353         if (IS_ERR(group))
354                 return PTR_ERR(group);
355
356         iommu_group_put(group);
357
358         iommu_device_link(&gart_handle->iommu, dev);
359
360         return 0;
361 }
362
363 static void gart_iommu_remove_device(struct device *dev)
364 {
365         iommu_group_remove_device(dev);
366         iommu_device_unlink(&gart_handle->iommu, dev);
367 }
368
369 static int gart_iommu_of_xlate(struct device *dev,
370                                struct of_phandle_args *args)
371 {
372         return 0;
373 }
374
375 static void gart_iommu_sync(struct iommu_domain *domain)
376 {
377         struct gart_domain *gart_domain = to_gart_domain(domain);
378         struct gart_device *gart = gart_domain->gart;
379
380         FLUSH_GART_REGS(gart);
381 }
382
383 static const struct iommu_ops gart_iommu_ops = {
384         .capable        = gart_iommu_capable,
385         .domain_alloc   = gart_iommu_domain_alloc,
386         .domain_free    = gart_iommu_domain_free,
387         .attach_dev     = gart_iommu_attach_dev,
388         .detach_dev     = gart_iommu_detach_dev,
389         .add_device     = gart_iommu_add_device,
390         .remove_device  = gart_iommu_remove_device,
391         .device_group   = generic_device_group,
392         .map            = gart_iommu_map,
393         .unmap          = gart_iommu_unmap,
394         .iova_to_phys   = gart_iommu_iova_to_phys,
395         .pgsize_bitmap  = GART_IOMMU_PGSIZES,
396         .of_xlate       = gart_iommu_of_xlate,
397         .iotlb_sync_map = gart_iommu_sync,
398         .iotlb_sync     = gart_iommu_sync,
399 };
400
401 int tegra_gart_suspend(struct gart_device *gart)
402 {
403         unsigned long iova;
404         u32 *data = gart->savedata;
405         unsigned long flags;
406
407         spin_lock_irqsave(&gart->pte_lock, flags);
408         for_each_gart_pte(gart, iova)
409                 *(data++) = gart_read_pte(gart, iova);
410         spin_unlock_irqrestore(&gart->pte_lock, flags);
411         return 0;
412 }
413
414 int tegra_gart_resume(struct gart_device *gart)
415 {
416         unsigned long flags;
417
418         spin_lock_irqsave(&gart->pte_lock, flags);
419         do_gart_setup(gart, gart->savedata);
420         spin_unlock_irqrestore(&gart->pte_lock, flags);
421         return 0;
422 }
423
424 struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
425 {
426         struct gart_device *gart;
427         struct resource *res_remap;
428         void __iomem *gart_regs;
429         int ret;
430
431         BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
432
433         /* the GART memory aperture is required */
434         res_remap = platform_get_resource(to_platform_device(dev),
435                                           IORESOURCE_MEM, 1);
436         if (!res_remap) {
437                 dev_err(dev, "GART memory aperture expected\n");
438                 return ERR_PTR(-ENXIO);
439         }
440
441         gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
442         if (!gart) {
443                 dev_err(dev, "failed to allocate gart_device\n");
444                 return ERR_PTR(-ENOMEM);
445         }
446
447         ret = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
448         if (ret) {
449                 dev_err(dev, "Failed to register IOMMU in sysfs\n");
450                 return ERR_PTR(ret);
451         }
452
453         iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
454         iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
455
456         ret = iommu_device_register(&gart->iommu);
457         if (ret) {
458                 dev_err(dev, "Failed to register IOMMU\n");
459                 goto remove_sysfs;
460         }
461
462         gart->dev = dev;
463         gart_regs = mc->regs + GART_REG_BASE;
464         spin_lock_init(&gart->pte_lock);
465         spin_lock_init(&gart->client_lock);
466         INIT_LIST_HEAD(&gart->client);
467         gart->regs = gart_regs;
468         gart->iovmm_base = (dma_addr_t)res_remap->start;
469         gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
470
471         gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
472         if (!gart->savedata) {
473                 dev_err(dev, "failed to allocate context save area\n");
474                 ret = -ENOMEM;
475                 goto unregister_iommu;
476         }
477
478         do_gart_setup(gart, NULL);
479
480         gart_handle = gart;
481
482         return gart;
483
484 unregister_iommu:
485         iommu_device_unregister(&gart->iommu);
486 remove_sysfs:
487         iommu_device_sysfs_remove(&gart->iommu);
488
489         return ERR_PTR(ret);
490 }
491
492 module_param(gart_debug, bool, 0644);
493 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");