treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 61
[linux-block.git] / drivers / video / fbdev / pxa3xx-gcu.c
CommitLineData
74ba9207 1// SPDX-License-Identifier: GPL-2.0-or-later
364dbdf3 2/*
0b7f1cc7 3 * pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
364dbdf3
DM
4 *
5 * This driver needs a DirectFB counterpart in user space, communication
6 * is handled via mmap()ed memory areas and an ioctl.
7 *
8 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
9 * Copyright (c) 2009 Janine Kropp <nin@directfb.org>
10 * Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org>
364dbdf3
DM
11 */
12
13/*
14 * WARNING: This controller is attached to System Bus 2 of the PXA which
25985edc 15 * needs its arbiter to be enabled explicitly (CKENB & 1<<9).
364dbdf3
DM
16 * There is currently no way to do this from Linux, so you need to teach
17 * your bootloader for now.
18 */
19
20#include <linux/module.h>
364dbdf3
DM
21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h>
23#include <linux/miscdevice.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/uaccess.h>
27#include <linux/ioctl.h>
28#include <linux/delay.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/clk.h>
32#include <linux/fs.h>
33#include <linux/io.h>
aa45ee8e 34#include <linux/of.h>
364dbdf3
DM
35
36#include "pxa3xx-gcu.h"
37
38#define DRV_NAME "pxa3xx-gcu"
39#define MISCDEV_MINOR 197
40
41#define REG_GCCR 0x00
42#define GCCR_SYNC_CLR (1 << 9)
43#define GCCR_BP_RST (1 << 8)
44#define GCCR_ABORT (1 << 6)
45#define GCCR_STOP (1 << 4)
46
47#define REG_GCISCR 0x04
48#define REG_GCIECR 0x08
49#define REG_GCRBBR 0x20
50#define REG_GCRBLR 0x24
51#define REG_GCRBHR 0x28
52#define REG_GCRBTR 0x2C
53#define REG_GCRBEXHR 0x30
54
55#define IE_EOB (1 << 0)
56#define IE_EEOB (1 << 5)
57#define IE_ALL 0xff
58
59#define SHARED_SIZE PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared))
60
61/* #define PXA3XX_GCU_DEBUG */
62/* #define PXA3XX_GCU_DEBUG_TIMER */
63
64#ifdef PXA3XX_GCU_DEBUG
65#define QDUMP(msg) \
66 do { \
67 QPRINT(priv, KERN_DEBUG, msg); \
68 } while (0)
69#else
70#define QDUMP(msg) do {} while (0)
71#endif
72
73#define QERROR(msg) \
74 do { \
75 QPRINT(priv, KERN_ERR, msg); \
76 } while (0)
77
78struct pxa3xx_gcu_batch {
79 struct pxa3xx_gcu_batch *next;
80 u32 *ptr;
81 dma_addr_t phys;
82 unsigned long length;
83};
84
85struct pxa3xx_gcu_priv {
02c486f4 86 struct device *dev;
364dbdf3
DM
87 void __iomem *mmio_base;
88 struct clk *clk;
89 struct pxa3xx_gcu_shared *shared;
90 dma_addr_t shared_phys;
91 struct resource *resource_mem;
92 struct miscdevice misc_dev;
364dbdf3
DM
93 wait_queue_head_t wait_idle;
94 wait_queue_head_t wait_free;
95 spinlock_t spinlock;
f7a75354 96 struct timespec64 base_time;
364dbdf3
DM
97
98 struct pxa3xx_gcu_batch *free;
364dbdf3
DM
99 struct pxa3xx_gcu_batch *ready;
100 struct pxa3xx_gcu_batch *ready_last;
101 struct pxa3xx_gcu_batch *running;
102};
103
104static inline unsigned long
105gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off)
106{
107 return __raw_readl(priv->mmio_base + off);
108}
109
110static inline void
111gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val)
112{
113 __raw_writel(val, priv->mmio_base + off);
114}
115
116#define QPRINT(priv, level, msg) \
117 do { \
f7a75354 118 struct timespec64 ts; \
364dbdf3
DM
119 struct pxa3xx_gcu_shared *shared = priv->shared; \
120 u32 base = gc_readl(priv, REG_GCRBBR); \
121 \
f7a75354
AB
122 ktime_get_ts64(&ts); \
123 ts = timespec64_sub(ts, priv->base_time); \
364dbdf3 124 \
f7a75354 125 printk(level "%lld.%03ld.%03ld - %-17s: %-21s (%s, " \
364dbdf3
DM
126 "STATUS " \
127 "0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, " \
128 "T %5ld)\n", \
f7a75354
AB
129 (s64)(ts.tv_sec), \
130 ts.tv_nsec / NSEC_PER_MSEC, \
131 (ts.tv_nsec % NSEC_PER_MSEC) / USEC_PER_MSEC, \
364dbdf3
DM
132 __func__, msg, \
133 shared->hw_running ? "running" : " idle", \
134 gc_readl(priv, REG_GCISCR), \
135 gc_readl(priv, REG_GCRBBR), \
136 gc_readl(priv, REG_GCRBLR), \
137 (gc_readl(priv, REG_GCRBEXHR) - base) / 4, \
138 (gc_readl(priv, REG_GCRBHR) - base) / 4, \
139 (gc_readl(priv, REG_GCRBTR) - base) / 4); \
140 } while (0)
141
142static void
143pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
144{
145 QDUMP("RESET");
146
147 /* disable interrupts */
148 gc_writel(priv, REG_GCIECR, 0);
149
150 /* reset hardware */
151 gc_writel(priv, REG_GCCR, GCCR_ABORT);
152 gc_writel(priv, REG_GCCR, 0);
153
154 memset(priv->shared, 0, SHARED_SIZE);
155 priv->shared->buffer_phys = priv->shared_phys;
156 priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
157
f7a75354 158 ktime_get_ts64(&priv->base_time);
364dbdf3
DM
159
160 /* set up the ring buffer pointers */
161 gc_writel(priv, REG_GCRBLR, 0);
162 gc_writel(priv, REG_GCRBBR, priv->shared_phys);
163 gc_writel(priv, REG_GCRBTR, priv->shared_phys);
164
165 /* enable all IRQs except EOB */
166 gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
167}
168
169static void
170dump_whole_state(struct pxa3xx_gcu_priv *priv)
171{
172 struct pxa3xx_gcu_shared *sh = priv->shared;
173 u32 base = gc_readl(priv, REG_GCRBBR);
174
175 QDUMP("DUMP");
176
177 printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
178 "%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
179 sh->hw_running ? "running" : "idle ",
180 gc_readl(priv, REG_GCISCR),
181 gc_readl(priv, REG_GCRBBR),
182 gc_readl(priv, REG_GCRBLR),
183 (gc_readl(priv, REG_GCRBEXHR) - base) / 4,
184 (gc_readl(priv, REG_GCRBHR) - base) / 4,
185 (gc_readl(priv, REG_GCRBTR) - base) / 4);
186}
187
188static void
189flush_running(struct pxa3xx_gcu_priv *priv)
190{
191 struct pxa3xx_gcu_batch *running = priv->running;
192 struct pxa3xx_gcu_batch *next;
193
194 while (running) {
195 next = running->next;
196 running->next = priv->free;
197 priv->free = running;
198 running = next;
199 }
200
201 priv->running = NULL;
202}
203
204static void
205run_ready(struct pxa3xx_gcu_priv *priv)
206{
207 unsigned int num = 0;
208 struct pxa3xx_gcu_shared *shared = priv->shared;
209 struct pxa3xx_gcu_batch *ready = priv->ready;
210
211 QDUMP("Start");
212
213 BUG_ON(!ready);
214
215 shared->buffer[num++] = 0x05000000;
216
217 while (ready) {
218 shared->buffer[num++] = 0x00000001;
219 shared->buffer[num++] = ready->phys;
220 ready = ready->next;
221 }
222
223 shared->buffer[num++] = 0x05000000;
224 priv->running = priv->ready;
225 priv->ready = priv->ready_last = NULL;
226 gc_writel(priv, REG_GCRBLR, 0);
227 shared->hw_running = 1;
228
229 /* ring base address */
230 gc_writel(priv, REG_GCRBBR, shared->buffer_phys);
231
232 /* ring tail address */
233 gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4);
234
235 /* ring length */
236 gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4);
237}
238
239static irqreturn_t
240pxa3xx_gcu_handle_irq(int irq, void *ctx)
241{
242 struct pxa3xx_gcu_priv *priv = ctx;
243 struct pxa3xx_gcu_shared *shared = priv->shared;
244 u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL;
245
246 QDUMP("-Interrupt");
247
248 if (!status)
249 return IRQ_NONE;
250
251 spin_lock(&priv->spinlock);
252 shared->num_interrupts++;
253
254 if (status & IE_EEOB) {
255 QDUMP(" [EEOB]");
256
257 flush_running(priv);
258 wake_up_all(&priv->wait_free);
259
260 if (priv->ready) {
261 run_ready(priv);
262 } else {
263 /* There is no more data prepared by the userspace.
264 * Set hw_running = 0 and wait for the next userspace
265 * kick-off */
266 shared->num_idle++;
267 shared->hw_running = 0;
268
269 QDUMP(" '-> Idle.");
270
271 /* set ring buffer length to zero */
272 gc_writel(priv, REG_GCRBLR, 0);
273
274 wake_up_all(&priv->wait_idle);
275 }
276
277 shared->num_done++;
278 } else {
279 QERROR(" [???]");
280 dump_whole_state(priv);
281 }
282
283 /* Clear the interrupt */
284 gc_writel(priv, REG_GCISCR, status);
285 spin_unlock(&priv->spinlock);
286
287 return IRQ_HANDLED;
288}
289
290static int
291pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
292{
293 int ret = 0;
294
295 QDUMP("Waiting for idle...");
296
297 /* Does not need to be atomic. There's a lock in user space,
298 * but anyhow, this is just for statistics. */
299 priv->shared->num_wait_idle++;
300
301 while (priv->shared->hw_running) {
302 int num = priv->shared->num_interrupts;
303 u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
304
305 ret = wait_event_interruptible_timeout(priv->wait_idle,
306 !priv->shared->hw_running, HZ*4);
307
688ec344 308 if (ret != 0)
364dbdf3
DM
309 break;
310
364dbdf3
DM
311 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
312 priv->shared->num_interrupts == num) {
313 QERROR("TIMEOUT");
314 ret = -ETIMEDOUT;
315 break;
316 }
317 }
318
319 QDUMP("done");
320
321 return ret;
322}
323
324static int
325pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
326{
327 int ret = 0;
328
329 QDUMP("Waiting for free...");
330
331 /* Does not need to be atomic. There's a lock in user space,
332 * but anyhow, this is just for statistics. */
333 priv->shared->num_wait_free++;
334
335 while (!priv->free) {
336 u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
337
338 ret = wait_event_interruptible_timeout(priv->wait_free,
339 priv->free, HZ*4);
340
341 if (ret < 0)
342 break;
343
344 if (ret > 0)
345 continue;
346
347 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) {
348 QERROR("TIMEOUT");
349 ret = -ETIMEDOUT;
350 break;
351 }
352 }
353
354 QDUMP("done");
355
356 return ret;
357}
358
359/* Misc device layer */
360
109393af 361static inline struct pxa3xx_gcu_priv *to_pxa3xx_gcu_priv(struct file *file)
996142e6
AV
362{
363 struct miscdevice *dev = file->private_data;
364 return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
365}
366
3437b2b8
DM
367/*
368 * provide an empty .open callback, so the core sets file->private_data
369 * for us.
370 */
371static int pxa3xx_gcu_open(struct inode *inode, struct file *file)
372{
373 return 0;
374}
375
364dbdf3 376static ssize_t
109393af
DM
377pxa3xx_gcu_write(struct file *file, const char *buff,
378 size_t count, loff_t *offp)
364dbdf3
DM
379{
380 int ret;
381 unsigned long flags;
382 struct pxa3xx_gcu_batch *buffer;
109393af 383 struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
364dbdf3
DM
384
385 int words = count / 4;
386
387 /* Does not need to be atomic. There's a lock in user space,
388 * but anyhow, this is just for statistics. */
389 priv->shared->num_writes++;
364dbdf3
DM
390 priv->shared->num_words += words;
391
392 /* Last word reserved for batch buffer end command */
393 if (words >= PXA3XX_GCU_BATCH_WORDS)
394 return -E2BIG;
395
396 /* Wait for a free buffer */
397 if (!priv->free) {
398 ret = pxa3xx_gcu_wait_free(priv);
399 if (ret < 0)
400 return ret;
401 }
402
403 /*
404 * Get buffer from free list
405 */
406 spin_lock_irqsave(&priv->spinlock, flags);
364dbdf3
DM
407 buffer = priv->free;
408 priv->free = buffer->next;
364dbdf3
DM
409 spin_unlock_irqrestore(&priv->spinlock, flags);
410
411
412 /* Copy data from user into buffer */
413 ret = copy_from_user(buffer->ptr, buff, words * 4);
414 if (ret) {
415 spin_lock_irqsave(&priv->spinlock, flags);
416 buffer->next = priv->free;
417 priv->free = buffer;
418 spin_unlock_irqrestore(&priv->spinlock, flags);
0b7f1cc7 419 return -EFAULT;
364dbdf3
DM
420 }
421
422 buffer->length = words;
423
424 /* Append batch buffer end command */
425 buffer->ptr[words] = 0x01000000;
426
427 /*
428 * Add buffer to ready list
429 */
430 spin_lock_irqsave(&priv->spinlock, flags);
431
432 buffer->next = NULL;
433
434 if (priv->ready) {
435 BUG_ON(priv->ready_last == NULL);
436
437 priv->ready_last->next = buffer;
438 } else
439 priv->ready = buffer;
440
441 priv->ready_last = buffer;
442
443 if (!priv->shared->hw_running)
444 run_ready(priv);
445
446 spin_unlock_irqrestore(&priv->spinlock, flags);
447
448 return words * 4;
449}
450
451
452static long
109393af 453pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
364dbdf3
DM
454{
455 unsigned long flags;
109393af 456 struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
364dbdf3
DM
457
458 switch (cmd) {
459 case PXA3XX_GCU_IOCTL_RESET:
460 spin_lock_irqsave(&priv->spinlock, flags);
461 pxa3xx_gcu_reset(priv);
462 spin_unlock_irqrestore(&priv->spinlock, flags);
463 return 0;
464
465 case PXA3XX_GCU_IOCTL_WAIT_IDLE:
466 return pxa3xx_gcu_wait_idle(priv);
467 }
468
469 return -ENOSYS;
470}
471
472static int
109393af 473pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
364dbdf3
DM
474{
475 unsigned int size = vma->vm_end - vma->vm_start;
109393af 476 struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
364dbdf3
DM
477
478 switch (vma->vm_pgoff) {
479 case 0:
480 /* hand out the shared data area */
481 if (size != SHARED_SIZE)
482 return -EINVAL;
483
02c486f4 484 return dma_mmap_coherent(priv->dev, vma,
364dbdf3
DM
485 priv->shared, priv->shared_phys, size);
486
487 case SHARED_SIZE >> PAGE_SHIFT:
488 /* hand out the MMIO base for direct register access
489 * from userspace */
490 if (size != resource_size(priv->resource_mem))
491 return -EINVAL;
492
364dbdf3
DM
493 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
494
495 return io_remap_pfn_range(vma, vma->vm_start,
496 priv->resource_mem->start >> PAGE_SHIFT,
497 size, vma->vm_page_prot);
498 }
499
500 return -EINVAL;
501}
502
503
504#ifdef PXA3XX_GCU_DEBUG_TIMER
505static struct timer_list pxa3xx_gcu_debug_timer;
e4a67df7 506static struct pxa3xx_gcu_priv *debug_timer_priv;
364dbdf3 507
e4a67df7 508static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
364dbdf3 509{
e4a67df7 510 struct pxa3xx_gcu_priv *priv = debug_timer_priv;
364dbdf3
DM
511
512 QERROR("Timer DUMP");
513
5eabff1c 514 mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
364dbdf3
DM
515}
516
e4a67df7 517static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
364dbdf3 518{
e4a67df7
KC
519 /* init the timer structure */
520 debug_timer_priv = priv;
521 timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
522 pxa3xx_gcu_debug_timedout(NULL);
364dbdf3
DM
523}
524#else
e4a67df7 525static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
364dbdf3
DM
526#endif
527
528static int
9e4f9675 529pxa3xx_gcu_add_buffer(struct device *dev,
109393af 530 struct pxa3xx_gcu_priv *priv)
364dbdf3
DM
531{
532 struct pxa3xx_gcu_batch *buffer;
533
534 buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL);
535 if (!buffer)
536 return -ENOMEM;
537
9e4f9675 538 buffer->ptr = dma_alloc_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
364dbdf3
DM
539 &buffer->phys, GFP_KERNEL);
540 if (!buffer->ptr) {
541 kfree(buffer);
542 return -ENOMEM;
543 }
544
545 buffer->next = priv->free;
364dbdf3
DM
546 priv->free = buffer;
547
548 return 0;
549}
550
551static void
9e4f9675 552pxa3xx_gcu_free_buffers(struct device *dev,
109393af 553 struct pxa3xx_gcu_priv *priv)
364dbdf3
DM
554{
555 struct pxa3xx_gcu_batch *next, *buffer = priv->free;
556
557 while (buffer) {
558 next = buffer->next;
559
9e4f9675 560 dma_free_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
364dbdf3
DM
561 buffer->ptr, buffer->phys);
562
563 kfree(buffer);
364dbdf3
DM
564 buffer = next;
565 }
566
567 priv->free = NULL;
568}
569
109393af
DM
570static const struct file_operations pxa3xx_gcu_miscdev_fops = {
571 .owner = THIS_MODULE,
3437b2b8 572 .open = pxa3xx_gcu_open,
109393af
DM
573 .write = pxa3xx_gcu_write,
574 .unlocked_ioctl = pxa3xx_gcu_ioctl,
575 .mmap = pxa3xx_gcu_mmap,
264bd660
AV
576};
577
9e4f9675 578static int pxa3xx_gcu_probe(struct platform_device *pdev)
364dbdf3
DM
579{
580 int i, ret, irq;
581 struct resource *r;
582 struct pxa3xx_gcu_priv *priv;
9e4f9675 583 struct device *dev = &pdev->dev;
364dbdf3 584
a9b47c7f 585 priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
364dbdf3
DM
586 if (!priv)
587 return -ENOMEM;
588
364dbdf3
DM
589 init_waitqueue_head(&priv->wait_idle);
590 init_waitqueue_head(&priv->wait_free);
591 spin_lock_init(&priv->spinlock);
592
593 /* we allocate the misc device structure as part of our own allocation,
594 * so we can get a pointer to our priv structure later on with
595 * container_of(). This isn't really necessary as we have a fixed minor
596 * number anyway, but this is to avoid statics. */
597
364dbdf3
DM
598 priv->misc_dev.minor = MISCDEV_MINOR,
599 priv->misc_dev.name = DRV_NAME,
109393af 600 priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
364dbdf3 601
364dbdf3 602 /* handle IO resources */
9e4f9675 603 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9b22b8c5
JH
604 priv->mmio_base = devm_ioremap_resource(dev, r);
605 if (IS_ERR(priv->mmio_base))
a9b47c7f 606 return PTR_ERR(priv->mmio_base);
364dbdf3 607
a9b47c7f
DM
608 /* enable the clock */
609 priv->clk = devm_clk_get(dev, NULL);
610 if (IS_ERR(priv->clk)) {
611 dev_err(dev, "failed to get clock\n");
612 return PTR_ERR(priv->clk);
364dbdf3
DM
613 }
614
a9b47c7f
DM
615 /* request the IRQ */
616 irq = platform_get_irq(pdev, 0);
617 if (irq < 0) {
7588f1ec
GS
618 dev_err(dev, "no IRQ defined: %d\n", irq);
619 return irq;
a9b47c7f
DM
620 }
621
622 ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
623 0, DRV_NAME, priv);
624 if (ret < 0) {
625 dev_err(dev, "request_irq failed\n");
626 return ret;
364dbdf3
DM
627 }
628
629 /* allocate dma memory */
9e4f9675 630 priv->shared = dma_alloc_coherent(dev, SHARED_SIZE,
364dbdf3 631 &priv->shared_phys, GFP_KERNEL);
364dbdf3 632 if (!priv->shared) {
9e4f9675 633 dev_err(dev, "failed to allocate DMA memory\n");
a9b47c7f 634 return -ENOMEM;
364dbdf3
DM
635 }
636
a9b47c7f
DM
637 /* register misc device */
638 ret = misc_register(&priv->misc_dev);
639 if (ret < 0) {
640 dev_err(dev, "misc_register() for minor %d failed\n",
641 MISCDEV_MINOR);
364dbdf3
DM
642 goto err_free_dma;
643 }
644
9e6e35ed 645 ret = clk_prepare_enable(priv->clk);
364dbdf3 646 if (ret < 0) {
9e4f9675 647 dev_err(dev, "failed to enable clock\n");
a9b47c7f 648 goto err_misc_deregister;
364dbdf3
DM
649 }
650
a9b47c7f
DM
651 for (i = 0; i < 8; i++) {
652 ret = pxa3xx_gcu_add_buffer(dev, priv);
653 if (ret) {
654 dev_err(dev, "failed to allocate DMA memory\n");
655 goto err_disable_clk;
656 }
364dbdf3
DM
657 }
658
9e4f9675 659 platform_set_drvdata(pdev, priv);
364dbdf3 660 priv->resource_mem = r;
02c486f4 661 priv->dev = dev;
364dbdf3 662 pxa3xx_gcu_reset(priv);
e4a67df7 663 pxa3xx_gcu_init_debug_timer(priv);
364dbdf3 664
9e4f9675 665 dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
364dbdf3
DM
666 (void *) r->start, (void *) priv->shared_phys,
667 SHARED_SIZE, irq);
668 return 0;
669
364dbdf3 670err_free_dma:
9e4f9675 671 dma_free_coherent(dev, SHARED_SIZE,
364dbdf3
DM
672 priv->shared, priv->shared_phys);
673
364dbdf3
DM
674err_misc_deregister:
675 misc_deregister(&priv->misc_dev);
676
a9b47c7f 677err_disable_clk:
9e6e35ed 678 clk_disable_unprepare(priv->clk);
a9b47c7f 679
364dbdf3
DM
680 return ret;
681}
682
9e4f9675 683static int pxa3xx_gcu_remove(struct platform_device *pdev)
364dbdf3 684{
9e4f9675 685 struct pxa3xx_gcu_priv *priv = platform_get_drvdata(pdev);
9e4f9675 686 struct device *dev = &pdev->dev;
364dbdf3
DM
687
688 pxa3xx_gcu_wait_idle(priv);
364dbdf3 689 misc_deregister(&priv->misc_dev);
a9b47c7f 690 dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
109393af 691 pxa3xx_gcu_free_buffers(dev, priv);
364dbdf3
DM
692
693 return 0;
694}
695
aa45ee8e
DM
696#ifdef CONFIG_OF
697static const struct of_device_id pxa3xx_gcu_of_match[] = {
698 { .compatible = "marvell,pxa300-gcu", },
699 { }
700};
701MODULE_DEVICE_TABLE(of, pxa3xx_gcu_of_match);
702#endif
703
364dbdf3
DM
704static struct platform_driver pxa3xx_gcu_driver = {
705 .probe = pxa3xx_gcu_probe,
48c68c4f 706 .remove = pxa3xx_gcu_remove,
364dbdf3 707 .driver = {
364dbdf3 708 .name = DRV_NAME,
aa45ee8e 709 .of_match_table = of_match_ptr(pxa3xx_gcu_of_match),
364dbdf3
DM
710 },
711};
712
4277f2c4 713module_platform_driver(pxa3xx_gcu_driver);
364dbdf3
DM
714
715MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
716MODULE_LICENSE("GPL");
717MODULE_ALIAS_MISCDEV(MISCDEV_MINOR);
718MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
719 "Denis Oliver Kropp <dok@directfb.org>, "
720 "Daniel Mack <daniel@caiaq.de>");