Commit | Line | Data |
---|---|---|
d53e54b4 HD |
1 | /* |
2 | * IOMMU API for GART in Tegra20 | |
3 | * | |
4 | * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/list.h> | |
29 | #include <linux/device.h> | |
30 | #include <linux/io.h> | |
31 | #include <linux/iommu.h> | |
7cffae42 | 32 | #include <linux/of.h> |
d53e54b4 HD |
33 | |
34 | #include <asm/cacheflush.h> | |
35 | ||
36 | /* bitmap of the page sizes currently supported */ | |
37 | #define GART_IOMMU_PGSIZES (SZ_4K) | |
38 | ||
774dfc9b HD |
39 | #define GART_REG_BASE 0x24 |
40 | #define GART_CONFIG (0x24 - GART_REG_BASE) | |
41 | #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE) | |
42 | #define GART_ENTRY_DATA (0x2c - GART_REG_BASE) | |
d53e54b4 HD |
43 | #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31) |
44 | ||
45 | #define GART_PAGE_SHIFT 12 | |
46 | #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT) | |
47 | #define GART_PAGE_MASK \ | |
48 | (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID) | |
49 | ||
50 | struct gart_client { | |
51 | struct device *dev; | |
52 | struct list_head list; | |
53 | }; | |
54 | ||
55 | struct gart_device { | |
56 | void __iomem *regs; | |
57 | u32 *savedata; | |
58 | u32 page_count; /* total remappable size */ | |
59 | dma_addr_t iovmm_base; /* offset to vmm_area */ | |
60 | spinlock_t pte_lock; /* for pagetable */ | |
61 | struct list_head client; | |
62 | spinlock_t client_lock; /* for client list */ | |
63 | struct device *dev; | |
c184ae83 JR |
64 | |
65 | struct iommu_device iommu; /* IOMMU Core handle */ | |
d53e54b4 HD |
66 | }; |
67 | ||
b5cbb386 JR |
68 | struct gart_domain { |
69 | struct iommu_domain domain; /* generic domain handle */ | |
70 | struct gart_device *gart; /* link to gart device */ | |
71 | }; | |
72 | ||
d53e54b4 HD |
73 | static struct gart_device *gart_handle; /* unique for a system */ |
74 | ||
40c9b882 DO |
75 | static bool gart_debug; |
76 | ||
d53e54b4 HD |
77 | #define GART_PTE(_pfn) \ |
78 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) | |
79 | ||
b5cbb386 JR |
80 | static struct gart_domain *to_gart_domain(struct iommu_domain *dom) |
81 | { | |
82 | return container_of(dom, struct gart_domain, domain); | |
83 | } | |
84 | ||
d53e54b4 HD |
85 | /* |
86 | * Any interaction between any block on PPSB and a block on APB or AHB | |
87 | * must have these read-back to ensure the APB/AHB bus transaction is | |
88 | * complete before initiating activity on the PPSB block. | |
89 | */ | |
90 | #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG)) | |
91 | ||
92 | #define for_each_gart_pte(gart, iova) \ | |
93 | for (iova = gart->iovmm_base; \ | |
94 | iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ | |
95 | iova += GART_PAGE_SIZE) | |
96 | ||
97 | static inline void gart_set_pte(struct gart_device *gart, | |
98 | unsigned long offs, u32 pte) | |
99 | { | |
100 | writel(offs, gart->regs + GART_ENTRY_ADDR); | |
101 | writel(pte, gart->regs + GART_ENTRY_DATA); | |
102 | ||
103 | dev_dbg(gart->dev, "%s %08lx:%08x\n", | |
104 | pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK); | |
105 | } | |
106 | ||
107 | static inline unsigned long gart_read_pte(struct gart_device *gart, | |
108 | unsigned long offs) | |
109 | { | |
110 | unsigned long pte; | |
111 | ||
112 | writel(offs, gart->regs + GART_ENTRY_ADDR); | |
113 | pte = readl(gart->regs + GART_ENTRY_DATA); | |
114 | ||
115 | return pte; | |
116 | } | |
117 | ||
118 | static void do_gart_setup(struct gart_device *gart, const u32 *data) | |
119 | { | |
120 | unsigned long iova; | |
121 | ||
122 | for_each_gart_pte(gart, iova) | |
123 | gart_set_pte(gart, iova, data ? *(data++) : 0); | |
124 | ||
125 | writel(1, gart->regs + GART_CONFIG); | |
126 | FLUSH_GART_REGS(gart); | |
127 | } | |
128 | ||
129 | #ifdef DEBUG | |
130 | static void gart_dump_table(struct gart_device *gart) | |
131 | { | |
132 | unsigned long iova; | |
133 | unsigned long flags; | |
134 | ||
135 | spin_lock_irqsave(&gart->pte_lock, flags); | |
136 | for_each_gart_pte(gart, iova) { | |
137 | unsigned long pte; | |
138 | ||
139 | pte = gart_read_pte(gart, iova); | |
140 | ||
141 | dev_dbg(gart->dev, "%s %08lx:%08lx\n", | |
142 | (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ", | |
143 | iova, pte & GART_PAGE_MASK); | |
144 | } | |
145 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
146 | } | |
147 | #else | |
148 | static inline void gart_dump_table(struct gart_device *gart) | |
149 | { | |
150 | } | |
151 | #endif | |
152 | ||
153 | static inline bool gart_iova_range_valid(struct gart_device *gart, | |
154 | unsigned long iova, size_t bytes) | |
155 | { | |
156 | unsigned long iova_start, iova_end, gart_start, gart_end; | |
157 | ||
158 | iova_start = iova; | |
159 | iova_end = iova_start + bytes - 1; | |
160 | gart_start = gart->iovmm_base; | |
161 | gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; | |
162 | ||
163 | if (iova_start < gart_start) | |
164 | return false; | |
165 | if (iova_end > gart_end) | |
166 | return false; | |
167 | return true; | |
168 | } | |
169 | ||
170 | static int gart_iommu_attach_dev(struct iommu_domain *domain, | |
171 | struct device *dev) | |
172 | { | |
b5cbb386 | 173 | struct gart_domain *gart_domain = to_gart_domain(domain); |
7f65ef01 | 174 | struct gart_device *gart = gart_domain->gart; |
d53e54b4 HD |
175 | struct gart_client *client, *c; |
176 | int err = 0; | |
177 | ||
d53e54b4 HD |
178 | client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); |
179 | if (!client) | |
180 | return -ENOMEM; | |
181 | client->dev = dev; | |
182 | ||
183 | spin_lock(&gart->client_lock); | |
184 | list_for_each_entry(c, &gart->client, list) { | |
185 | if (c->dev == dev) { | |
186 | dev_err(gart->dev, | |
187 | "%s is already attached\n", dev_name(dev)); | |
188 | err = -EINVAL; | |
189 | goto fail; | |
190 | } | |
191 | } | |
192 | list_add(&client->list, &gart->client); | |
193 | spin_unlock(&gart->client_lock); | |
194 | dev_dbg(gart->dev, "Attached %s\n", dev_name(dev)); | |
195 | return 0; | |
196 | ||
197 | fail: | |
198 | devm_kfree(gart->dev, client); | |
199 | spin_unlock(&gart->client_lock); | |
200 | return err; | |
201 | } | |
202 | ||
203 | static void gart_iommu_detach_dev(struct iommu_domain *domain, | |
204 | struct device *dev) | |
205 | { | |
b5cbb386 JR |
206 | struct gart_domain *gart_domain = to_gart_domain(domain); |
207 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
208 | struct gart_client *c; |
209 | ||
210 | spin_lock(&gart->client_lock); | |
211 | ||
212 | list_for_each_entry(c, &gart->client, list) { | |
213 | if (c->dev == dev) { | |
214 | list_del(&c->list); | |
215 | devm_kfree(gart->dev, c); | |
216 | dev_dbg(gart->dev, "Detached %s\n", dev_name(dev)); | |
217 | goto out; | |
218 | } | |
219 | } | |
220 | dev_err(gart->dev, "Couldn't find\n"); | |
221 | out: | |
222 | spin_unlock(&gart->client_lock); | |
223 | } | |
224 | ||
b5cbb386 | 225 | static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) |
d53e54b4 | 226 | { |
b5cbb386 | 227 | struct gart_domain *gart_domain; |
836a8ac9 | 228 | struct gart_device *gart; |
d53e54b4 | 229 | |
b5cbb386 JR |
230 | if (type != IOMMU_DOMAIN_UNMANAGED) |
231 | return NULL; | |
d53e54b4 | 232 | |
836a8ac9 | 233 | gart = gart_handle; |
d53e54b4 | 234 | if (!gart) |
7f65ef01 | 235 | return NULL; |
d53e54b4 | 236 | |
b5cbb386 JR |
237 | gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL); |
238 | if (!gart_domain) | |
239 | return NULL; | |
836a8ac9 | 240 | |
7f65ef01 JR |
241 | gart_domain->gart = gart; |
242 | gart_domain->domain.geometry.aperture_start = gart->iovmm_base; | |
243 | gart_domain->domain.geometry.aperture_end = gart->iovmm_base + | |
836a8ac9 | 244 | gart->page_count * GART_PAGE_SIZE - 1; |
7f65ef01 | 245 | gart_domain->domain.geometry.force_aperture = true; |
836a8ac9 | 246 | |
b5cbb386 | 247 | return &gart_domain->domain; |
d53e54b4 HD |
248 | } |
249 | ||
b5cbb386 | 250 | static void gart_iommu_domain_free(struct iommu_domain *domain) |
d53e54b4 | 251 | { |
b5cbb386 JR |
252 | struct gart_domain *gart_domain = to_gart_domain(domain); |
253 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 | 254 | |
b5cbb386 JR |
255 | if (gart) { |
256 | spin_lock(&gart->client_lock); | |
257 | if (!list_empty(&gart->client)) { | |
258 | struct gart_client *c; | |
d53e54b4 | 259 | |
b5cbb386 JR |
260 | list_for_each_entry(c, &gart->client, list) |
261 | gart_iommu_detach_dev(domain, c->dev); | |
262 | } | |
263 | spin_unlock(&gart->client_lock); | |
d53e54b4 | 264 | } |
b5cbb386 JR |
265 | |
266 | kfree(gart_domain); | |
d53e54b4 HD |
267 | } |
268 | ||
269 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
270 | phys_addr_t pa, size_t bytes, int prot) | |
271 | { | |
b5cbb386 JR |
272 | struct gart_domain *gart_domain = to_gart_domain(domain); |
273 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
274 | unsigned long flags; |
275 | unsigned long pfn; | |
40c9b882 | 276 | unsigned long pte; |
d53e54b4 HD |
277 | |
278 | if (!gart_iova_range_valid(gart, iova, bytes)) | |
279 | return -EINVAL; | |
280 | ||
281 | spin_lock_irqsave(&gart->pte_lock, flags); | |
282 | pfn = __phys_to_pfn(pa); | |
283 | if (!pfn_valid(pfn)) { | |
e56b3dab | 284 | dev_err(gart->dev, "Invalid page: %pa\n", &pa); |
09c32533 | 285 | spin_unlock_irqrestore(&gart->pte_lock, flags); |
d53e54b4 HD |
286 | return -EINVAL; |
287 | } | |
40c9b882 DO |
288 | if (gart_debug) { |
289 | pte = gart_read_pte(gart, iova); | |
290 | if (pte & GART_ENTRY_PHYS_ADDR_VALID) { | |
291 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
292 | dev_err(gart->dev, "Page entry is in-use\n"); | |
293 | return -EBUSY; | |
294 | } | |
295 | } | |
d53e54b4 HD |
296 | gart_set_pte(gart, iova, GART_PTE(pfn)); |
297 | FLUSH_GART_REGS(gart); | |
298 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
299 | return 0; | |
300 | } | |
301 | ||
302 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
303 | size_t bytes) | |
304 | { | |
b5cbb386 JR |
305 | struct gart_domain *gart_domain = to_gart_domain(domain); |
306 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
307 | unsigned long flags; |
308 | ||
309 | if (!gart_iova_range_valid(gart, iova, bytes)) | |
310 | return 0; | |
311 | ||
312 | spin_lock_irqsave(&gart->pte_lock, flags); | |
313 | gart_set_pte(gart, iova, 0); | |
314 | FLUSH_GART_REGS(gart); | |
315 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
130a2fdf | 316 | return bytes; |
d53e54b4 HD |
317 | } |
318 | ||
319 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | |
bb5547ac | 320 | dma_addr_t iova) |
d53e54b4 | 321 | { |
b5cbb386 JR |
322 | struct gart_domain *gart_domain = to_gart_domain(domain); |
323 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
324 | unsigned long pte; |
325 | phys_addr_t pa; | |
326 | unsigned long flags; | |
327 | ||
328 | if (!gart_iova_range_valid(gart, iova, 0)) | |
329 | return -EINVAL; | |
330 | ||
331 | spin_lock_irqsave(&gart->pte_lock, flags); | |
332 | pte = gart_read_pte(gart, iova); | |
333 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
334 | ||
335 | pa = (pte & GART_PAGE_MASK); | |
336 | if (!pfn_valid(__phys_to_pfn(pa))) { | |
e56b3dab TR |
337 | dev_err(gart->dev, "No entry for %08llx:%pa\n", |
338 | (unsigned long long)iova, &pa); | |
d53e54b4 HD |
339 | gart_dump_table(gart); |
340 | return -EINVAL; | |
341 | } | |
342 | return pa; | |
343 | } | |
344 | ||
7c2aa644 | 345 | static bool gart_iommu_capable(enum iommu_cap cap) |
d53e54b4 | 346 | { |
7c2aa644 | 347 | return false; |
d53e54b4 HD |
348 | } |
349 | ||
15f9a310 RM |
350 | static int gart_iommu_add_device(struct device *dev) |
351 | { | |
352 | struct iommu_group *group = iommu_group_get_for_dev(dev); | |
353 | ||
354 | if (IS_ERR(group)) | |
355 | return PTR_ERR(group); | |
356 | ||
357 | iommu_group_put(group); | |
c184ae83 JR |
358 | |
359 | iommu_device_link(&gart_handle->iommu, dev); | |
360 | ||
15f9a310 RM |
361 | return 0; |
362 | } | |
363 | ||
364 | static void gart_iommu_remove_device(struct device *dev) | |
365 | { | |
366 | iommu_group_remove_device(dev); | |
c184ae83 | 367 | iommu_device_unlink(&gart_handle->iommu, dev); |
15f9a310 RM |
368 | } |
369 | ||
b22f6434 | 370 | static const struct iommu_ops gart_iommu_ops = { |
7c2aa644 | 371 | .capable = gart_iommu_capable, |
b5cbb386 JR |
372 | .domain_alloc = gart_iommu_domain_alloc, |
373 | .domain_free = gart_iommu_domain_free, | |
d53e54b4 HD |
374 | .attach_dev = gart_iommu_attach_dev, |
375 | .detach_dev = gart_iommu_detach_dev, | |
15f9a310 RM |
376 | .add_device = gart_iommu_add_device, |
377 | .remove_device = gart_iommu_remove_device, | |
378 | .device_group = generic_device_group, | |
d53e54b4 HD |
379 | .map = gart_iommu_map, |
380 | .unmap = gart_iommu_unmap, | |
381 | .iova_to_phys = gart_iommu_iova_to_phys, | |
d53e54b4 HD |
382 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
383 | }; | |
384 | ||
385 | static int tegra_gart_suspend(struct device *dev) | |
386 | { | |
387 | struct gart_device *gart = dev_get_drvdata(dev); | |
388 | unsigned long iova; | |
389 | u32 *data = gart->savedata; | |
390 | unsigned long flags; | |
391 | ||
392 | spin_lock_irqsave(&gart->pte_lock, flags); | |
393 | for_each_gart_pte(gart, iova) | |
394 | *(data++) = gart_read_pte(gart, iova); | |
395 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
396 | return 0; | |
397 | } | |
398 | ||
399 | static int tegra_gart_resume(struct device *dev) | |
400 | { | |
401 | struct gart_device *gart = dev_get_drvdata(dev); | |
402 | unsigned long flags; | |
403 | ||
404 | spin_lock_irqsave(&gart->pte_lock, flags); | |
405 | do_gart_setup(gart, gart->savedata); | |
406 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
407 | return 0; | |
408 | } | |
409 | ||
410 | static int tegra_gart_probe(struct platform_device *pdev) | |
411 | { | |
412 | struct gart_device *gart; | |
413 | struct resource *res, *res_remap; | |
414 | void __iomem *gart_regs; | |
d53e54b4 | 415 | struct device *dev = &pdev->dev; |
c184ae83 | 416 | int ret; |
d53e54b4 HD |
417 | |
418 | if (gart_handle) | |
419 | return -EIO; | |
420 | ||
421 | BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT); | |
422 | ||
423 | /* the GART memory aperture is required */ | |
424 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
425 | res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
426 | if (!res || !res_remap) { | |
427 | dev_err(dev, "GART memory aperture expected\n"); | |
428 | return -ENXIO; | |
429 | } | |
430 | ||
431 | gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL); | |
432 | if (!gart) { | |
433 | dev_err(dev, "failed to allocate gart_device\n"); | |
434 | return -ENOMEM; | |
435 | } | |
436 | ||
437 | gart_regs = devm_ioremap(dev, res->start, resource_size(res)); | |
438 | if (!gart_regs) { | |
439 | dev_err(dev, "failed to remap GART registers\n"); | |
d0c5b257 | 440 | return -ENXIO; |
d53e54b4 HD |
441 | } |
442 | ||
c184ae83 JR |
443 | ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL, |
444 | dev_name(&pdev->dev)); | |
445 | if (ret) { | |
446 | dev_err(dev, "Failed to register IOMMU in sysfs\n"); | |
447 | return ret; | |
448 | } | |
449 | ||
450 | iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); | |
451 | ||
452 | ret = iommu_device_register(&gart->iommu); | |
453 | if (ret) { | |
454 | dev_err(dev, "Failed to register IOMMU\n"); | |
455 | iommu_device_sysfs_remove(&gart->iommu); | |
456 | return ret; | |
457 | } | |
458 | ||
d53e54b4 HD |
459 | gart->dev = &pdev->dev; |
460 | spin_lock_init(&gart->pte_lock); | |
461 | spin_lock_init(&gart->client_lock); | |
462 | INIT_LIST_HEAD(&gart->client); | |
463 | gart->regs = gart_regs; | |
464 | gart->iovmm_base = (dma_addr_t)res_remap->start; | |
465 | gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); | |
466 | ||
42bc47b3 | 467 | gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count)); |
d53e54b4 HD |
468 | if (!gart->savedata) { |
469 | dev_err(dev, "failed to allocate context save area\n"); | |
d0c5b257 | 470 | return -ENOMEM; |
d53e54b4 HD |
471 | } |
472 | ||
473 | platform_set_drvdata(pdev, gart); | |
474 | do_gart_setup(gart, NULL); | |
475 | ||
476 | gart_handle = gart; | |
c7e3ca51 | 477 | |
d53e54b4 | 478 | return 0; |
d53e54b4 HD |
479 | } |
480 | ||
481 | static int tegra_gart_remove(struct platform_device *pdev) | |
482 | { | |
483 | struct gart_device *gart = platform_get_drvdata(pdev); | |
d53e54b4 | 484 | |
c184ae83 JR |
485 | iommu_device_unregister(&gart->iommu); |
486 | iommu_device_sysfs_remove(&gart->iommu); | |
487 | ||
d53e54b4 HD |
488 | writel(0, gart->regs + GART_CONFIG); |
489 | if (gart->savedata) | |
490 | vfree(gart->savedata); | |
d53e54b4 HD |
491 | gart_handle = NULL; |
492 | return 0; | |
493 | } | |
494 | ||
8a788659 | 495 | static const struct dev_pm_ops tegra_gart_pm_ops = { |
d53e54b4 HD |
496 | .suspend = tegra_gart_suspend, |
497 | .resume = tegra_gart_resume, | |
498 | }; | |
499 | ||
d943b0ff | 500 | static const struct of_device_id tegra_gart_of_match[] = { |
7cffae42 TR |
501 | { .compatible = "nvidia,tegra20-gart", }, |
502 | { }, | |
503 | }; | |
504 | MODULE_DEVICE_TABLE(of, tegra_gart_of_match); | |
7cffae42 | 505 | |
d53e54b4 HD |
506 | static struct platform_driver tegra_gart_driver = { |
507 | .probe = tegra_gart_probe, | |
508 | .remove = tegra_gart_remove, | |
509 | .driver = { | |
d53e54b4 HD |
510 | .name = "tegra-gart", |
511 | .pm = &tegra_gart_pm_ops, | |
e664e8c0 | 512 | .of_match_table = tegra_gart_of_match, |
d53e54b4 HD |
513 | }, |
514 | }; | |
515 | ||
d34d6517 | 516 | static int tegra_gart_init(void) |
d53e54b4 | 517 | { |
d53e54b4 HD |
518 | return platform_driver_register(&tegra_gart_driver); |
519 | } | |
520 | ||
521 | static void __exit tegra_gart_exit(void) | |
522 | { | |
523 | platform_driver_unregister(&tegra_gart_driver); | |
524 | } | |
525 | ||
526 | subsys_initcall(tegra_gart_init); | |
527 | module_exit(tegra_gart_exit); | |
40c9b882 | 528 | module_param(gart_debug, bool, 0644); |
d53e54b4 | 529 | |
40c9b882 | 530 | MODULE_PARM_DESC(gart_debug, "Enable GART debugging"); |
d53e54b4 HD |
531 | MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); |
532 | MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); | |
7cffae42 | 533 | MODULE_ALIAS("platform:tegra-gart"); |
d53e54b4 | 534 | MODULE_LICENSE("GPL v2"); |