Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/component.h> | |
18 | #include <linux/of_platform.h> | |
19 | ||
20 | #include "etnaviv_drv.h" | |
21 | #include "etnaviv_gpu.h" | |
22 | #include "etnaviv_gem.h" | |
23 | #include "etnaviv_mmu.h" | |
24 | #include "etnaviv_gem.h" | |
25 | ||
26 | #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING | |
27 | static bool reglog; | |
28 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); | |
29 | module_param(reglog, bool, 0600); | |
30 | #else | |
31 | #define reglog 0 | |
32 | #endif | |
33 | ||
34 | void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, | |
35 | const char *dbgname) | |
36 | { | |
37 | struct resource *res; | |
38 | void __iomem *ptr; | |
39 | ||
40 | if (name) | |
41 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | |
42 | else | |
43 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
44 | ||
45 | ptr = devm_ioremap_resource(&pdev->dev, res); | |
46 | if (IS_ERR(ptr)) { | |
47 | dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, | |
48 | PTR_ERR(ptr)); | |
49 | return ptr; | |
50 | } | |
51 | ||
52 | if (reglog) | |
53 | dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", | |
54 | dbgname, ptr, (size_t)resource_size(res)); | |
55 | ||
56 | return ptr; | |
57 | } | |
58 | ||
59 | void etnaviv_writel(u32 data, void __iomem *addr) | |
60 | { | |
61 | if (reglog) | |
62 | printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); | |
63 | ||
64 | writel(data, addr); | |
65 | } | |
66 | ||
67 | u32 etnaviv_readl(const void __iomem *addr) | |
68 | { | |
69 | u32 val = readl(addr); | |
70 | ||
71 | if (reglog) | |
72 | printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); | |
73 | ||
74 | return val; | |
75 | } | |
76 | ||
77 | /* | |
78 | * DRM operations: | |
79 | */ | |
80 | ||
81 | ||
82 | static void load_gpu(struct drm_device *dev) | |
83 | { | |
84 | struct etnaviv_drm_private *priv = dev->dev_private; | |
85 | unsigned int i; | |
86 | ||
87 | for (i = 0; i < ETNA_MAX_PIPES; i++) { | |
88 | struct etnaviv_gpu *g = priv->gpu[i]; | |
89 | ||
90 | if (g) { | |
91 | int ret; | |
92 | ||
93 | ret = etnaviv_gpu_init(g); | |
94 | if (ret) { | |
95 | dev_err(g->dev, "hw init failed: %d\n", ret); | |
96 | priv->gpu[i] = NULL; | |
97 | } | |
98 | } | |
99 | } | |
100 | } | |
101 | ||
102 | static int etnaviv_open(struct drm_device *dev, struct drm_file *file) | |
103 | { | |
104 | struct etnaviv_file_private *ctx; | |
105 | ||
106 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
107 | if (!ctx) | |
108 | return -ENOMEM; | |
109 | ||
110 | file->driver_priv = ctx; | |
111 | ||
112 | return 0; | |
113 | } | |
114 | ||
115 | static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) | |
116 | { | |
117 | struct etnaviv_drm_private *priv = dev->dev_private; | |
118 | struct etnaviv_file_private *ctx = file->driver_priv; | |
119 | unsigned int i; | |
120 | ||
121 | for (i = 0; i < ETNA_MAX_PIPES; i++) { | |
122 | struct etnaviv_gpu *gpu = priv->gpu[i]; | |
123 | ||
124 | if (gpu) { | |
125 | mutex_lock(&gpu->lock); | |
126 | if (gpu->lastctx == ctx) | |
127 | gpu->lastctx = NULL; | |
128 | mutex_unlock(&gpu->lock); | |
129 | } | |
130 | } | |
131 | ||
132 | kfree(ctx); | |
133 | } | |
134 | ||
135 | /* | |
136 | * DRM debugfs: | |
137 | */ | |
138 | ||
139 | #ifdef CONFIG_DEBUG_FS | |
140 | static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) | |
141 | { | |
142 | struct etnaviv_drm_private *priv = dev->dev_private; | |
143 | ||
144 | etnaviv_gem_describe_objects(priv, m); | |
145 | ||
146 | return 0; | |
147 | } | |
148 | ||
149 | static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) | |
150 | { | |
151 | int ret; | |
152 | ||
153 | read_lock(&dev->vma_offset_manager->vm_lock); | |
154 | ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); | |
155 | read_unlock(&dev->vma_offset_manager->vm_lock); | |
156 | ||
157 | return ret; | |
158 | } | |
159 | ||
160 | static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) | |
161 | { | |
162 | seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); | |
163 | ||
164 | mutex_lock(&gpu->mmu->lock); | |
165 | drm_mm_dump_table(m, &gpu->mmu->mm); | |
166 | mutex_unlock(&gpu->mmu->lock); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) | |
172 | { | |
173 | struct etnaviv_cmdbuf *buf = gpu->buffer; | |
174 | u32 size = buf->size; | |
175 | u32 *ptr = buf->vaddr; | |
176 | u32 i; | |
177 | ||
178 | seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", | |
179 | buf->vaddr, (u64)buf->paddr, size - buf->user_size); | |
180 | ||
181 | for (i = 0; i < size / 4; i++) { | |
182 | if (i && !(i % 4)) | |
183 | seq_puts(m, "\n"); | |
184 | if (i % 4 == 0) | |
185 | seq_printf(m, "\t0x%p: ", ptr + i); | |
186 | seq_printf(m, "%08x ", *(ptr + i)); | |
187 | } | |
188 | seq_puts(m, "\n"); | |
189 | } | |
190 | ||
191 | static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) | |
192 | { | |
193 | seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); | |
194 | ||
195 | mutex_lock(&gpu->lock); | |
196 | etnaviv_buffer_dump(gpu, m); | |
197 | mutex_unlock(&gpu->lock); | |
198 | ||
199 | return 0; | |
200 | } | |
201 | ||
202 | static int show_unlocked(struct seq_file *m, void *arg) | |
203 | { | |
204 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
205 | struct drm_device *dev = node->minor->dev; | |
206 | int (*show)(struct drm_device *dev, struct seq_file *m) = | |
207 | node->info_ent->data; | |
208 | ||
209 | return show(dev, m); | |
210 | } | |
211 | ||
212 | static int show_each_gpu(struct seq_file *m, void *arg) | |
213 | { | |
214 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
215 | struct drm_device *dev = node->minor->dev; | |
216 | struct etnaviv_drm_private *priv = dev->dev_private; | |
217 | struct etnaviv_gpu *gpu; | |
218 | int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = | |
219 | node->info_ent->data; | |
220 | unsigned int i; | |
221 | int ret = 0; | |
222 | ||
223 | for (i = 0; i < ETNA_MAX_PIPES; i++) { | |
224 | gpu = priv->gpu[i]; | |
225 | if (!gpu) | |
226 | continue; | |
227 | ||
228 | ret = show(gpu, m); | |
229 | if (ret < 0) | |
230 | break; | |
231 | } | |
232 | ||
233 | return ret; | |
234 | } | |
235 | ||
236 | static struct drm_info_list etnaviv_debugfs_list[] = { | |
237 | {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, | |
238 | {"gem", show_unlocked, 0, etnaviv_gem_show}, | |
239 | { "mm", show_unlocked, 0, etnaviv_mm_show }, | |
240 | {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, | |
241 | {"ring", show_each_gpu, 0, etnaviv_ring_show}, | |
242 | }; | |
243 | ||
244 | static int etnaviv_debugfs_init(struct drm_minor *minor) | |
245 | { | |
246 | struct drm_device *dev = minor->dev; | |
247 | int ret; | |
248 | ||
249 | ret = drm_debugfs_create_files(etnaviv_debugfs_list, | |
250 | ARRAY_SIZE(etnaviv_debugfs_list), | |
251 | minor->debugfs_root, minor); | |
252 | ||
253 | if (ret) { | |
254 | dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); | |
255 | return ret; | |
256 | } | |
257 | ||
258 | return ret; | |
259 | } | |
260 | ||
261 | static void etnaviv_debugfs_cleanup(struct drm_minor *minor) | |
262 | { | |
263 | drm_debugfs_remove_files(etnaviv_debugfs_list, | |
264 | ARRAY_SIZE(etnaviv_debugfs_list), minor); | |
265 | } | |
266 | #endif | |
267 | ||
268 | /* | |
269 | * DRM ioctls: | |
270 | */ | |
271 | ||
272 | static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, | |
273 | struct drm_file *file) | |
274 | { | |
275 | struct etnaviv_drm_private *priv = dev->dev_private; | |
276 | struct drm_etnaviv_param *args = data; | |
277 | struct etnaviv_gpu *gpu; | |
278 | ||
279 | if (args->pipe >= ETNA_MAX_PIPES) | |
280 | return -EINVAL; | |
281 | ||
282 | gpu = priv->gpu[args->pipe]; | |
283 | if (!gpu) | |
284 | return -ENXIO; | |
285 | ||
286 | return etnaviv_gpu_get_param(gpu, args->param, &args->value); | |
287 | } | |
288 | ||
289 | static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, | |
290 | struct drm_file *file) | |
291 | { | |
292 | struct drm_etnaviv_gem_new *args = data; | |
293 | ||
294 | if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | | |
295 | ETNA_BO_FORCE_MMU)) | |
296 | return -EINVAL; | |
297 | ||
298 | return etnaviv_gem_new_handle(dev, file, args->size, | |
299 | args->flags, &args->handle); | |
300 | } | |
301 | ||
302 | #define TS(t) ((struct timespec){ \ | |
303 | .tv_sec = (t).tv_sec, \ | |
304 | .tv_nsec = (t).tv_nsec \ | |
305 | }) | |
306 | ||
307 | static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, | |
308 | struct drm_file *file) | |
309 | { | |
310 | struct drm_etnaviv_gem_cpu_prep *args = data; | |
311 | struct drm_gem_object *obj; | |
312 | int ret; | |
313 | ||
314 | if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) | |
315 | return -EINVAL; | |
316 | ||
317 | obj = drm_gem_object_lookup(dev, file, args->handle); | |
318 | if (!obj) | |
319 | return -ENOENT; | |
320 | ||
321 | ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); | |
322 | ||
323 | drm_gem_object_unreference_unlocked(obj); | |
324 | ||
325 | return ret; | |
326 | } | |
327 | ||
328 | static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, | |
329 | struct drm_file *file) | |
330 | { | |
331 | struct drm_etnaviv_gem_cpu_fini *args = data; | |
332 | struct drm_gem_object *obj; | |
333 | int ret; | |
334 | ||
335 | if (args->flags) | |
336 | return -EINVAL; | |
337 | ||
338 | obj = drm_gem_object_lookup(dev, file, args->handle); | |
339 | if (!obj) | |
340 | return -ENOENT; | |
341 | ||
342 | ret = etnaviv_gem_cpu_fini(obj); | |
343 | ||
344 | drm_gem_object_unreference_unlocked(obj); | |
345 | ||
346 | return ret; | |
347 | } | |
348 | ||
349 | static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, | |
350 | struct drm_file *file) | |
351 | { | |
352 | struct drm_etnaviv_gem_info *args = data; | |
353 | struct drm_gem_object *obj; | |
354 | int ret; | |
355 | ||
356 | if (args->pad) | |
357 | return -EINVAL; | |
358 | ||
359 | obj = drm_gem_object_lookup(dev, file, args->handle); | |
360 | if (!obj) | |
361 | return -ENOENT; | |
362 | ||
363 | ret = etnaviv_gem_mmap_offset(obj, &args->offset); | |
364 | drm_gem_object_unreference_unlocked(obj); | |
365 | ||
366 | return ret; | |
367 | } | |
368 | ||
369 | static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, | |
370 | struct drm_file *file) | |
371 | { | |
372 | struct drm_etnaviv_wait_fence *args = data; | |
373 | struct etnaviv_drm_private *priv = dev->dev_private; | |
374 | struct timespec *timeout = &TS(args->timeout); | |
375 | struct etnaviv_gpu *gpu; | |
376 | ||
377 | if (args->flags & ~(ETNA_WAIT_NONBLOCK)) | |
378 | return -EINVAL; | |
379 | ||
380 | if (args->pipe >= ETNA_MAX_PIPES) | |
381 | return -EINVAL; | |
382 | ||
383 | gpu = priv->gpu[args->pipe]; | |
384 | if (!gpu) | |
385 | return -ENXIO; | |
386 | ||
387 | if (args->flags & ETNA_WAIT_NONBLOCK) | |
388 | timeout = NULL; | |
389 | ||
390 | return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, | |
391 | timeout); | |
392 | } | |
393 | ||
394 | static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, | |
395 | struct drm_file *file) | |
396 | { | |
397 | struct drm_etnaviv_gem_userptr *args = data; | |
398 | int access; | |
399 | ||
400 | if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || | |
401 | args->flags == 0) | |
402 | return -EINVAL; | |
403 | ||
404 | if (offset_in_page(args->user_ptr | args->user_size) || | |
405 | (uintptr_t)args->user_ptr != args->user_ptr || | |
406 | (u32)args->user_size != args->user_size || | |
407 | args->user_ptr & ~PAGE_MASK) | |
408 | return -EINVAL; | |
409 | ||
410 | if (args->flags & ETNA_USERPTR_WRITE) | |
411 | access = VERIFY_WRITE; | |
412 | else | |
413 | access = VERIFY_READ; | |
414 | ||
415 | if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, | |
416 | args->user_size)) | |
417 | return -EFAULT; | |
418 | ||
419 | return etnaviv_gem_new_userptr(dev, file, args->user_ptr, | |
420 | args->user_size, args->flags, | |
421 | &args->handle); | |
422 | } | |
423 | ||
424 | static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, | |
425 | struct drm_file *file) | |
426 | { | |
427 | struct etnaviv_drm_private *priv = dev->dev_private; | |
428 | struct drm_etnaviv_gem_wait *args = data; | |
429 | struct timespec *timeout = &TS(args->timeout); | |
430 | struct drm_gem_object *obj; | |
431 | struct etnaviv_gpu *gpu; | |
432 | int ret; | |
433 | ||
434 | if (args->flags & ~(ETNA_WAIT_NONBLOCK)) | |
435 | return -EINVAL; | |
436 | ||
437 | if (args->pipe >= ETNA_MAX_PIPES) | |
438 | return -EINVAL; | |
439 | ||
440 | gpu = priv->gpu[args->pipe]; | |
441 | if (!gpu) | |
442 | return -ENXIO; | |
443 | ||
444 | obj = drm_gem_object_lookup(dev, file, args->handle); | |
445 | if (!obj) | |
446 | return -ENOENT; | |
447 | ||
448 | if (args->flags & ETNA_WAIT_NONBLOCK) | |
449 | timeout = NULL; | |
450 | ||
451 | ret = etnaviv_gem_wait_bo(gpu, obj, timeout); | |
452 | ||
453 | drm_gem_object_unreference_unlocked(obj); | |
454 | ||
455 | return ret; | |
456 | } | |
457 | ||
458 | static const struct drm_ioctl_desc etnaviv_ioctls[] = { | |
459 | #define ETNA_IOCTL(n, func, flags) \ | |
460 | DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) | |
461 | ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), | |
462 | ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), | |
463 | ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), | |
464 | ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), | |
465 | ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), | |
466 | ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), | |
467 | ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), | |
468 | ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), | |
469 | ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), | |
470 | }; | |
471 | ||
472 | static const struct vm_operations_struct vm_ops = { | |
473 | .fault = etnaviv_gem_fault, | |
474 | .open = drm_gem_vm_open, | |
475 | .close = drm_gem_vm_close, | |
476 | }; | |
477 | ||
478 | static const struct file_operations fops = { | |
479 | .owner = THIS_MODULE, | |
480 | .open = drm_open, | |
481 | .release = drm_release, | |
482 | .unlocked_ioctl = drm_ioctl, | |
483 | #ifdef CONFIG_COMPAT | |
484 | .compat_ioctl = drm_compat_ioctl, | |
485 | #endif | |
486 | .poll = drm_poll, | |
487 | .read = drm_read, | |
488 | .llseek = no_llseek, | |
489 | .mmap = etnaviv_gem_mmap, | |
490 | }; | |
491 | ||
492 | static struct drm_driver etnaviv_drm_driver = { | |
493 | .driver_features = DRIVER_HAVE_IRQ | | |
494 | DRIVER_GEM | | |
495 | DRIVER_PRIME | | |
496 | DRIVER_RENDER, | |
497 | .open = etnaviv_open, | |
498 | .preclose = etnaviv_preclose, | |
499 | .set_busid = drm_platform_set_busid, | |
500 | .gem_free_object = etnaviv_gem_free_object, | |
501 | .gem_vm_ops = &vm_ops, | |
502 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
503 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
504 | .gem_prime_export = drm_gem_prime_export, | |
505 | .gem_prime_import = drm_gem_prime_import, | |
506 | .gem_prime_pin = etnaviv_gem_prime_pin, | |
507 | .gem_prime_unpin = etnaviv_gem_prime_unpin, | |
508 | .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, | |
509 | .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, | |
510 | .gem_prime_vmap = etnaviv_gem_prime_vmap, | |
511 | .gem_prime_vunmap = etnaviv_gem_prime_vunmap, | |
512 | #ifdef CONFIG_DEBUG_FS | |
513 | .debugfs_init = etnaviv_debugfs_init, | |
514 | .debugfs_cleanup = etnaviv_debugfs_cleanup, | |
515 | #endif | |
516 | .ioctls = etnaviv_ioctls, | |
517 | .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, | |
518 | .fops = &fops, | |
519 | .name = "etnaviv", | |
520 | .desc = "etnaviv DRM", | |
521 | .date = "20151214", | |
522 | .major = 1, | |
523 | .minor = 0, | |
524 | }; | |
525 | ||
526 | /* | |
527 | * Platform driver: | |
528 | */ | |
529 | static int etnaviv_bind(struct device *dev) | |
530 | { | |
531 | struct etnaviv_drm_private *priv; | |
532 | struct drm_device *drm; | |
533 | int ret; | |
534 | ||
535 | drm = drm_dev_alloc(&etnaviv_drm_driver, dev); | |
536 | if (!drm) | |
537 | return -ENOMEM; | |
538 | ||
539 | drm->platformdev = to_platform_device(dev); | |
540 | ||
541 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
542 | if (!priv) { | |
543 | dev_err(dev, "failed to allocate private data\n"); | |
544 | ret = -ENOMEM; | |
545 | goto out_unref; | |
546 | } | |
547 | drm->dev_private = priv; | |
548 | ||
549 | priv->wq = alloc_ordered_workqueue("etnaviv", 0); | |
550 | if (!priv->wq) { | |
551 | ret = -ENOMEM; | |
552 | goto out_wq; | |
553 | } | |
554 | ||
555 | mutex_init(&priv->gem_lock); | |
556 | INIT_LIST_HEAD(&priv->gem_list); | |
557 | priv->num_gpus = 0; | |
558 | ||
559 | dev_set_drvdata(dev, drm); | |
560 | ||
561 | ret = component_bind_all(dev, drm); | |
562 | if (ret < 0) | |
563 | goto out_bind; | |
564 | ||
565 | load_gpu(drm); | |
566 | ||
567 | ret = drm_dev_register(drm, 0); | |
568 | if (ret) | |
569 | goto out_register; | |
570 | ||
571 | return 0; | |
572 | ||
573 | out_register: | |
574 | component_unbind_all(dev, drm); | |
575 | out_bind: | |
576 | flush_workqueue(priv->wq); | |
577 | destroy_workqueue(priv->wq); | |
578 | out_wq: | |
579 | kfree(priv); | |
580 | out_unref: | |
581 | drm_dev_unref(drm); | |
582 | ||
583 | return ret; | |
584 | } | |
585 | ||
586 | static void etnaviv_unbind(struct device *dev) | |
587 | { | |
588 | struct drm_device *drm = dev_get_drvdata(dev); | |
589 | struct etnaviv_drm_private *priv = drm->dev_private; | |
590 | ||
591 | drm_dev_unregister(drm); | |
592 | ||
593 | flush_workqueue(priv->wq); | |
594 | destroy_workqueue(priv->wq); | |
595 | ||
596 | component_unbind_all(dev, drm); | |
597 | ||
598 | drm->dev_private = NULL; | |
599 | kfree(priv); | |
600 | ||
601 | drm_put_dev(drm); | |
602 | } | |
603 | ||
604 | static const struct component_master_ops etnaviv_master_ops = { | |
605 | .bind = etnaviv_bind, | |
606 | .unbind = etnaviv_unbind, | |
607 | }; | |
608 | ||
609 | static int compare_of(struct device *dev, void *data) | |
610 | { | |
611 | struct device_node *np = data; | |
612 | ||
613 | return dev->of_node == np; | |
614 | } | |
615 | ||
616 | static int compare_str(struct device *dev, void *data) | |
617 | { | |
618 | return !strcmp(dev_name(dev), data); | |
619 | } | |
620 | ||
621 | static int etnaviv_pdev_probe(struct platform_device *pdev) | |
622 | { | |
623 | struct device *dev = &pdev->dev; | |
624 | struct device_node *node = dev->of_node; | |
625 | struct component_match *match = NULL; | |
626 | ||
627 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
628 | ||
629 | if (node) { | |
630 | struct device_node *core_node; | |
631 | int i; | |
632 | ||
633 | for (i = 0; ; i++) { | |
634 | core_node = of_parse_phandle(node, "cores", i); | |
635 | if (!core_node) | |
636 | break; | |
637 | ||
638 | component_match_add(&pdev->dev, &match, compare_of, | |
639 | core_node); | |
640 | of_node_put(core_node); | |
641 | } | |
642 | } else if (dev->platform_data) { | |
643 | char **names = dev->platform_data; | |
644 | unsigned i; | |
645 | ||
646 | for (i = 0; names[i]; i++) | |
647 | component_match_add(dev, &match, compare_str, names[i]); | |
648 | } | |
649 | ||
650 | return component_master_add_with_match(dev, &etnaviv_master_ops, match); | |
651 | } | |
652 | ||
653 | static int etnaviv_pdev_remove(struct platform_device *pdev) | |
654 | { | |
655 | component_master_del(&pdev->dev, &etnaviv_master_ops); | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
660 | static const struct of_device_id dt_match[] = { | |
661 | { .compatible = "fsl,imx-gpu-subsystem" }, | |
662 | { .compatible = "marvell,dove-gpu-subsystem" }, | |
663 | {} | |
664 | }; | |
665 | MODULE_DEVICE_TABLE(of, dt_match); | |
666 | ||
667 | static struct platform_driver etnaviv_platform_driver = { | |
668 | .probe = etnaviv_pdev_probe, | |
669 | .remove = etnaviv_pdev_remove, | |
670 | .driver = { | |
a8c21a54 T |
671 | .name = "etnaviv", |
672 | .of_match_table = dt_match, | |
673 | }, | |
674 | }; | |
675 | ||
676 | static int __init etnaviv_init(void) | |
677 | { | |
678 | int ret; | |
679 | ||
680 | etnaviv_validate_init(); | |
681 | ||
682 | ret = platform_driver_register(&etnaviv_gpu_driver); | |
683 | if (ret != 0) | |
684 | return ret; | |
685 | ||
686 | ret = platform_driver_register(&etnaviv_platform_driver); | |
687 | if (ret != 0) | |
688 | platform_driver_unregister(&etnaviv_gpu_driver); | |
689 | ||
690 | return ret; | |
691 | } | |
692 | module_init(etnaviv_init); | |
693 | ||
694 | static void __exit etnaviv_exit(void) | |
695 | { | |
696 | platform_driver_unregister(&etnaviv_gpu_driver); | |
697 | platform_driver_unregister(&etnaviv_platform_driver); | |
698 | } | |
699 | module_exit(etnaviv_exit); | |
700 | ||
701 | MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); | |
702 | MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); | |
703 | MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); | |
704 | MODULE_DESCRIPTION("etnaviv DRM Driver"); | |
705 | MODULE_LICENSE("GPL v2"); | |
706 | MODULE_ALIAS("platform:etnaviv"); |