sunrpc: Include missing smp_lock.h
[linux-2.6-block.git] / drivers / mtd / maps / vmu-flash.c
1 /* vmu-flash.c
2  * Driver for SEGA Dreamcast Visual Memory Unit
3  *
4  * Copyright (c) Adrian McMenamin 2002 - 2009
5  * Copyright (c) Paul Mundt 2001
6  *
7  * Licensed under version 2 of the
8  * GNU General Public Licence
9  */
10 #include <linux/init.h>
11 #include <linux/sched.h>
12 #include <linux/delay.h>
13 #include <linux/maple.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/mtd/map.h>
16
17 struct vmu_cache {
18         unsigned char *buffer;          /* Cache */
19         unsigned int block;             /* Which block was cached */
20         unsigned long jiffies_atc;      /* When was it cached? */
21         int valid;
22 };
23
24 struct mdev_part {
25         struct maple_device *mdev;
26         int partition;
27 };
28
29 struct vmupart {
30         u16 user_blocks;
31         u16 root_block;
32         u16 numblocks;
33         char *name;
34         struct vmu_cache *pcache;
35 };
36
37 struct memcard {
38         u16 tempA;
39         u16 tempB;
40         u32 partitions;
41         u32 blocklen;
42         u32 writecnt;
43         u32 readcnt;
44         u32 removeable;
45         int partition;
46         int read;
47         unsigned char *blockread;
48         struct vmupart *parts;
49         struct mtd_info *mtd;
50 };
51
52 struct vmu_block {
53         unsigned int num; /* block number */
54         unsigned int ofs; /* block offset */
55 };
56
57 static struct vmu_block *ofs_to_block(unsigned long src_ofs,
58         struct mtd_info *mtd, int partition)
59 {
60         struct vmu_block *vblock;
61         struct maple_device *mdev;
62         struct memcard *card;
63         struct mdev_part *mpart;
64         int num;
65
66         mpart = mtd->priv;
67         mdev = mpart->mdev;
68         card = maple_get_drvdata(mdev);
69
70         if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
71                 goto failed;
72
73         num = src_ofs / card->blocklen;
74         if (num > card->parts[partition].numblocks)
75                 goto failed;
76
77         vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
78         if (!vblock)
79                 goto failed;
80
81         vblock->num = num;
82         vblock->ofs = src_ofs % card->blocklen;
83         return vblock;
84
85 failed:
86         return NULL;
87 }
88
89 /* Maple bus callback function for reads */
90 static void vmu_blockread(struct mapleq *mq)
91 {
92         struct maple_device *mdev;
93         struct memcard *card;
94
95         mdev = mq->dev;
96         card = maple_get_drvdata(mdev);
97         /* copy the read in data */
98
99         if (unlikely(!card->blockread))
100                 return;
101
102         memcpy(card->blockread, mq->recvbuf->buf + 12,
103                 card->blocklen/card->readcnt);
104
105 }
106
107 /* Interface with maple bus to read blocks
108  * caching the results so that other parts
109  * of the driver can access block reads */
110 static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
111         struct mtd_info *mtd)
112 {
113         struct memcard *card;
114         struct mdev_part *mpart;
115         struct maple_device *mdev;
116         int partition, error = 0, x, wait;
117         unsigned char *blockread = NULL;
118         struct vmu_cache *pcache;
119         __be32 sendbuf;
120
121         mpart = mtd->priv;
122         mdev = mpart->mdev;
123         partition = mpart->partition;
124         card = maple_get_drvdata(mdev);
125         pcache = card->parts[partition].pcache;
126         pcache->valid = 0;
127
128         /* prepare the cache for this block */
129         if (!pcache->buffer) {
130                 pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
131                 if (!pcache->buffer) {
132                         dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
133                                 " to lack of memory\n", mdev->port,
134                                 mdev->unit);
135                         error = -ENOMEM;
136                         goto outB;
137                 }
138         }
139
140         /*
141         * Reads may be phased - again the hardware spec
142         * supports this - though may not be any devices in
143         * the wild that implement it, but we will here
144         */
145         for (x = 0; x < card->readcnt; x++) {
146                 sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
147
148                 if (atomic_read(&mdev->busy) == 1) {
149                         wait_event_interruptible_timeout(mdev->maple_wait,
150                                 atomic_read(&mdev->busy) == 0, HZ);
151                         if (atomic_read(&mdev->busy) == 1) {
152                                 dev_notice(&mdev->dev, "VMU at (%d, %d)"
153                                         " is busy\n", mdev->port, mdev->unit);
154                                 error = -EAGAIN;
155                                 goto outB;
156                         }
157                 }
158
159                 atomic_set(&mdev->busy, 1);
160                 blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
161                 if (!blockread) {
162                         error = -ENOMEM;
163                         atomic_set(&mdev->busy, 0);
164                         goto outB;
165                 }
166                 card->blockread = blockread;
167
168                 maple_getcond_callback(mdev, vmu_blockread, 0,
169                         MAPLE_FUNC_MEMCARD);
170                 error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
171                                 MAPLE_COMMAND_BREAD, 2, &sendbuf);
172                 /* Very long timeouts seem to be needed when box is stressed */
173                 wait = wait_event_interruptible_timeout(mdev->maple_wait,
174                         (atomic_read(&mdev->busy) == 0 ||
175                         atomic_read(&mdev->busy) == 2), HZ * 3);
176                 /*
177                 * MTD layer does not handle hotplugging well
178                 * so have to return errors when VMU is unplugged
179                 * in the middle of a read (busy == 2)
180                 */
181                 if (error || atomic_read(&mdev->busy) == 2) {
182                         if (atomic_read(&mdev->busy) == 2)
183                                 error = -ENXIO;
184                         atomic_set(&mdev->busy, 0);
185                         card->blockread = NULL;
186                         goto outA;
187                 }
188                 if (wait == 0 || wait == -ERESTARTSYS) {
189                         card->blockread = NULL;
190                         atomic_set(&mdev->busy, 0);
191                         error = -EIO;
192                         list_del_init(&(mdev->mq->list));
193                         kfree(mdev->mq->sendbuf);
194                         mdev->mq->sendbuf = NULL;
195                         if (wait == -ERESTARTSYS) {
196                                 dev_warn(&mdev->dev, "VMU read on (%d, %d)"
197                                         " interrupted on block 0x%X\n",
198                                         mdev->port, mdev->unit, num);
199                         } else
200                                 dev_notice(&mdev->dev, "VMU read on (%d, %d)"
201                                         " timed out on block 0x%X\n",
202                                         mdev->port, mdev->unit, num);
203                         goto outA;
204                 }
205
206                 memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
207                         card->blocklen/card->readcnt);
208
209                 memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
210                         card->blockread, card->blocklen/card->readcnt);
211                 card->blockread = NULL;
212                 pcache->block = num;
213                 pcache->jiffies_atc = jiffies;
214                 pcache->valid = 1;
215                 kfree(blockread);
216         }
217
218         return error;
219
220 outA:
221         kfree(blockread);
222 outB:
223         return error;
224 }
225
226 /* communicate with maple bus for phased writing */
227 static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
228         struct mtd_info *mtd)
229 {
230         struct memcard *card;
231         struct mdev_part *mpart;
232         struct maple_device *mdev;
233         int partition, error, locking, x, phaselen, wait;
234         __be32 *sendbuf;
235
236         mpart = mtd->priv;
237         mdev = mpart->mdev;
238         partition = mpart->partition;
239         card = maple_get_drvdata(mdev);
240
241         phaselen = card->blocklen/card->writecnt;
242
243         sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
244         if (!sendbuf) {
245                 error = -ENOMEM;
246                 goto fail_nosendbuf;
247         }
248         for (x = 0; x < card->writecnt; x++) {
249                 sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
250                 memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
251                 /* wait until the device is not busy doing something else
252                 * or 1 second - which ever is longer */
253                 if (atomic_read(&mdev->busy) == 1) {
254                         wait_event_interruptible_timeout(mdev->maple_wait,
255                                 atomic_read(&mdev->busy) == 0, HZ);
256                         if (atomic_read(&mdev->busy) == 1) {
257                                 error = -EBUSY;
258                                 dev_notice(&mdev->dev, "VMU write at (%d, %d)"
259                                         "failed - device is busy\n",
260                                         mdev->port, mdev->unit);
261                                 goto fail_nolock;
262                         }
263                 }
264                 atomic_set(&mdev->busy, 1);
265
266                 locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
267                         MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
268                 wait = wait_event_interruptible_timeout(mdev->maple_wait,
269                         atomic_read(&mdev->busy) == 0, HZ/10);
270                 if (locking) {
271                         error = -EIO;
272                         atomic_set(&mdev->busy, 0);
273                         goto fail_nolock;
274                 }
275                 if (atomic_read(&mdev->busy) == 2) {
276                         atomic_set(&mdev->busy, 0);
277                 } else if (wait == 0 || wait == -ERESTARTSYS) {
278                         error = -EIO;
279                         dev_warn(&mdev->dev, "Write at (%d, %d) of block"
280                                 " 0x%X at phase %d failed: could not"
281                                 " communicate with VMU", mdev->port,
282                                 mdev->unit, num, x);
283                         atomic_set(&mdev->busy, 0);
284                         kfree(mdev->mq->sendbuf);
285                         mdev->mq->sendbuf = NULL;
286                         list_del_init(&(mdev->mq->list));
287                         goto fail_nolock;
288                 }
289         }
290         kfree(sendbuf);
291
292         return card->blocklen;
293
294 fail_nolock:
295         kfree(sendbuf);
296 fail_nosendbuf:
297         dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
298                 mdev->unit);
299         return error;
300 }
301
302 /* mtd function to simulate reading byte by byte */
303 static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
304         struct mtd_info *mtd)
305 {
306         struct vmu_block *vblock;
307         struct memcard *card;
308         struct mdev_part *mpart;
309         struct maple_device *mdev;
310         unsigned char *buf, ret;
311         int partition, error;
312
313         mpart = mtd->priv;
314         mdev = mpart->mdev;
315         partition = mpart->partition;
316         card = maple_get_drvdata(mdev);
317         *retval =  0;
318
319         buf = kmalloc(card->blocklen, GFP_KERNEL);
320         if (!buf) {
321                 *retval = 1;
322                 ret = -ENOMEM;
323                 goto finish;
324         }
325
326         vblock = ofs_to_block(ofs, mtd, partition);
327         if (!vblock) {
328                 *retval = 3;
329                 ret = -ENOMEM;
330                 goto out_buf;
331         }
332
333         error = maple_vmu_read_block(vblock->num, buf, mtd);
334         if (error) {
335                 ret = error;
336                 *retval = 2;
337                 goto out_vblock;
338         }
339
340         ret = buf[vblock->ofs];
341
342 out_vblock:
343         kfree(vblock);
344 out_buf:
345         kfree(buf);
346 finish:
347         return ret;
348 }
349
350 /* mtd higher order function to read flash */
351 static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
352         size_t *retlen,  u_char *buf)
353 {
354         struct maple_device *mdev;
355         struct memcard *card;
356         struct mdev_part *mpart;
357         struct vmu_cache *pcache;
358         struct vmu_block *vblock;
359         int index = 0, retval, partition, leftover, numblocks;
360         unsigned char cx;
361
362         if (len < 1)
363                 return -EIO;
364
365         mpart = mtd->priv;
366         mdev = mpart->mdev;
367         partition = mpart->partition;
368         card = maple_get_drvdata(mdev);
369
370         numblocks = card->parts[partition].numblocks;
371         if (from + len > numblocks * card->blocklen)
372                 len = numblocks * card->blocklen - from;
373         if (len == 0)
374                 return -EIO;
375         /* Have we cached this bit already? */
376         pcache = card->parts[partition].pcache;
377         do {
378                 vblock =  ofs_to_block(from + index, mtd, partition);
379                 if (!vblock)
380                         return -ENOMEM;
381                 /* Have we cached this and is the cache valid and timely? */
382                 if (pcache->valid &&
383                         time_before(jiffies, pcache->jiffies_atc + HZ) &&
384                         (pcache->block == vblock->num)) {
385                         /* we have cached it, so do necessary copying */
386                         leftover = card->blocklen - vblock->ofs;
387                         if (vblock->ofs + len - index < card->blocklen) {
388                                 /* only a bit of this block to copy */
389                                 memcpy(buf + index,
390                                         pcache->buffer + vblock->ofs,
391                                         len - index);
392                                 index = len;
393                         } else {
394                                 /* otherwise copy remainder of whole block */
395                                 memcpy(buf + index, pcache->buffer +
396                                         vblock->ofs, leftover);
397                                 index += leftover;
398                         }
399                 } else {
400                         /*
401                         * Not cached so read one byte -
402                         * but cache the rest of the block
403                         */
404                         cx = vmu_flash_read_char(from + index, &retval, mtd);
405                         if (retval) {
406                                 *retlen = index;
407                                 kfree(vblock);
408                                 return cx;
409                         }
410                         memset(buf + index, cx, 1);
411                         index++;
412                 }
413                 kfree(vblock);
414         } while (len > index);
415         *retlen = index;
416
417         return 0;
418 }
419
420 static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
421         size_t *retlen, const u_char *buf)
422 {
423         struct maple_device *mdev;
424         struct memcard *card;
425         struct mdev_part *mpart;
426         int index = 0, partition, error = 0, numblocks;
427         struct vmu_cache *pcache;
428         struct vmu_block *vblock;
429         unsigned char *buffer;
430
431         mpart = mtd->priv;
432         mdev = mpart->mdev;
433         partition = mpart->partition;
434         card = maple_get_drvdata(mdev);
435
436         /* simple sanity checks */
437         if (len < 1) {
438                 error = -EIO;
439                 goto failed;
440         }
441         numblocks = card->parts[partition].numblocks;
442         if (to + len > numblocks * card->blocklen)
443                 len = numblocks * card->blocklen - to;
444         if (len == 0) {
445                 error = -EIO;
446                 goto failed;
447         }
448
449         vblock = ofs_to_block(to, mtd, partition);
450         if (!vblock) {
451                 error = -ENOMEM;
452                 goto failed;
453         }
454
455         buffer = kmalloc(card->blocklen, GFP_KERNEL);
456         if (!buffer) {
457                 error = -ENOMEM;
458                 goto fail_buffer;
459         }
460
461         do {
462                 /* Read in the block we are to write to */
463                 error = maple_vmu_read_block(vblock->num, buffer, mtd);
464                 if (error)
465                         goto fail_io;
466
467                 do {
468                         buffer[vblock->ofs] = buf[index];
469                         vblock->ofs++;
470                         index++;
471                         if (index >= len)
472                                 break;
473                 } while (vblock->ofs < card->blocklen);
474
475                 /* write out new buffer */
476                 error = maple_vmu_write_block(vblock->num, buffer, mtd);
477                 /* invalidate the cache */
478                 pcache = card->parts[partition].pcache;
479                 pcache->valid = 0;
480
481                 if (error != card->blocklen)
482                         goto fail_io;
483
484                 vblock->num++;
485                 vblock->ofs = 0;
486         } while (len > index);
487
488         kfree(buffer);
489         *retlen = index;
490         kfree(vblock);
491         return 0;
492
493 fail_io:
494         kfree(buffer);
495 fail_buffer:
496         kfree(vblock);
497 failed:
498         dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
499         return error;
500 }
501
502 static void vmu_flash_sync(struct mtd_info *mtd)
503 {
504         /* Do nothing here */
505 }
506
507 /* Maple bus callback function to recursively query hardware details */
508 static void vmu_queryblocks(struct mapleq *mq)
509 {
510         struct maple_device *mdev;
511         unsigned short *res;
512         struct memcard *card;
513         __be32 partnum;
514         struct vmu_cache *pcache;
515         struct mdev_part *mpart;
516         struct mtd_info *mtd_cur;
517         struct vmupart *part_cur;
518         int error;
519
520         mdev = mq->dev;
521         card = maple_get_drvdata(mdev);
522         res = (unsigned short *) (mq->recvbuf->buf);
523         card->tempA = res[12];
524         card->tempB = res[6];
525
526         dev_info(&mdev->dev, "VMU device at partition %d has %d user "
527                 "blocks with a root block at %d\n", card->partition,
528                 card->tempA, card->tempB);
529
530         part_cur = &card->parts[card->partition];
531         part_cur->user_blocks = card->tempA;
532         part_cur->root_block = card->tempB;
533         part_cur->numblocks = card->tempB + 1;
534         part_cur->name = kmalloc(12, GFP_KERNEL);
535         if (!part_cur->name)
536                 goto fail_name;
537
538         sprintf(part_cur->name, "vmu%d.%d.%d",
539                 mdev->port, mdev->unit, card->partition);
540         mtd_cur = &card->mtd[card->partition];
541         mtd_cur->name = part_cur->name;
542         mtd_cur->type = 8;
543         mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
544         mtd_cur->size = part_cur->numblocks * card->blocklen;
545         mtd_cur->erasesize = card->blocklen;
546         mtd_cur->write = vmu_flash_write;
547         mtd_cur->read = vmu_flash_read;
548         mtd_cur->sync = vmu_flash_sync;
549         mtd_cur->writesize = card->blocklen;
550
551         mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
552         if (!mpart)
553                 goto fail_mpart;
554
555         mpart->mdev = mdev;
556         mpart->partition = card->partition;
557         mtd_cur->priv = mpart;
558         mtd_cur->owner = THIS_MODULE;
559
560         pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
561         if (!pcache)
562                 goto fail_cache_create;
563         part_cur->pcache = pcache;
564
565         error = add_mtd_device(mtd_cur);
566         if (error)
567                 goto fail_mtd_register;
568
569         maple_getcond_callback(mdev, NULL, 0,
570                 MAPLE_FUNC_MEMCARD);
571
572         /*
573         * Set up a recursive call to the (probably theoretical)
574         * second or more partition
575         */
576         if (++card->partition < card->partitions) {
577                 partnum = cpu_to_be32(card->partition << 24);
578                 maple_getcond_callback(mdev, vmu_queryblocks, 0,
579                         MAPLE_FUNC_MEMCARD);
580                 maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
581                         MAPLE_COMMAND_GETMINFO, 2, &partnum);
582         }
583         return;
584
585 fail_mtd_register:
586         dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
587                 "error is 0x%X\n", mdev->port, mdev->unit, error);
588         for (error = 0; error <= card->partition; error++) {
589                 kfree(((card->parts)[error]).pcache);
590                 ((card->parts)[error]).pcache = NULL;
591         }
592 fail_cache_create:
593 fail_mpart:
594         for (error = 0; error <= card->partition; error++) {
595                 kfree(((card->mtd)[error]).priv);
596                 ((card->mtd)[error]).priv = NULL;
597         }
598         maple_getcond_callback(mdev, NULL, 0,
599                 MAPLE_FUNC_MEMCARD);
600         kfree(part_cur->name);
601 fail_name:
602         return;
603 }
604
605 /* Handles very basic info about the flash, queries for details */
606 static int __devinit vmu_connect(struct maple_device *mdev)
607 {
608         unsigned long test_flash_data, basic_flash_data;
609         int c, error;
610         struct memcard *card;
611         u32 partnum = 0;
612
613         test_flash_data = be32_to_cpu(mdev->devinfo.function);
614         /* Need to count how many bits are set - to find out which
615          * function_data element has details of the memory card
616          */
617         c = hweight_long(test_flash_data);
618
619         basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
620
621         card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
622         if (!card) {
623                 error = -ENOMEM;
624                 goto fail_nomem;
625         }
626
627         card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
628         card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
629         card->writecnt = basic_flash_data >> 12 & 0xF;
630         card->readcnt = basic_flash_data >> 8 & 0xF;
631         card->removeable = basic_flash_data >> 7 & 1;
632
633         card->partition = 0;
634
635         /*
636         * Not sure there are actually any multi-partition devices in the
637         * real world, but the hardware supports them, so, so will we
638         */
639         card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
640                 GFP_KERNEL);
641         if (!card->parts) {
642                 error = -ENOMEM;
643                 goto fail_partitions;
644         }
645
646         card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
647                 GFP_KERNEL);
648         if (!card->mtd) {
649                 error = -ENOMEM;
650                 goto fail_mtd_info;
651         }
652
653         maple_set_drvdata(mdev, card);
654
655         /*
656         * We want to trap meminfo not get cond
657         * so set interval to zero, but rely on maple bus
658         * driver to pass back the results of the meminfo
659         */
660         maple_getcond_callback(mdev, vmu_queryblocks, 0,
661                 MAPLE_FUNC_MEMCARD);
662
663         /* Make sure we are clear to go */
664         if (atomic_read(&mdev->busy) == 1) {
665                 wait_event_interruptible_timeout(mdev->maple_wait,
666                         atomic_read(&mdev->busy) == 0, HZ);
667                 if (atomic_read(&mdev->busy) == 1) {
668                         dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
669                                 mdev->port, mdev->unit);
670                         error = -EAGAIN;
671                         goto fail_device_busy;
672                 }
673         }
674
675         atomic_set(&mdev->busy, 1);
676
677         /*
678         * Set up the minfo call: vmu_queryblocks will handle
679         * the information passed back
680         */
681         error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
682                 MAPLE_COMMAND_GETMINFO, 2, &partnum);
683         if (error) {
684                 dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
685                         " error is 0x%X\n", mdev->port, mdev->unit, error);
686                 goto fail_mtd_info;
687         }
688         return 0;
689
690 fail_device_busy:
691         kfree(card->mtd);
692 fail_mtd_info:
693         kfree(card->parts);
694 fail_partitions:
695         kfree(card);
696 fail_nomem:
697         return error;
698 }
699
700 static void __devexit vmu_disconnect(struct maple_device *mdev)
701 {
702         struct memcard *card;
703         struct mdev_part *mpart;
704         int x;
705
706         mdev->callback = NULL;
707         card = maple_get_drvdata(mdev);
708         for (x = 0; x < card->partitions; x++) {
709                 mpart = ((card->mtd)[x]).priv;
710                 mpart->mdev = NULL;
711                 del_mtd_device(&((card->mtd)[x]));
712                 kfree(((card->parts)[x]).name);
713         }
714         kfree(card->parts);
715         kfree(card->mtd);
716         kfree(card);
717 }
718
719 /* Callback to handle eccentricities of both mtd subsystem
720  * and general flakyness of Dreamcast VMUs
721  */
722 static int vmu_can_unload(struct maple_device *mdev)
723 {
724         struct memcard *card;
725         int x;
726         struct mtd_info *mtd;
727
728         card = maple_get_drvdata(mdev);
729         for (x = 0; x < card->partitions; x++) {
730                 mtd = &((card->mtd)[x]);
731                 if (mtd->usecount > 0)
732                         return 0;
733         }
734         return 1;
735 }
736
737 #define ERRSTR "VMU at (%d, %d) file error -"
738
739 static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
740 {
741         enum maple_file_errors error = ((int *)recvbuf)[1];
742
743         switch (error) {
744
745         case MAPLE_FILEERR_INVALID_PARTITION:
746                 dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
747                         mdev->port, mdev->unit);
748                 break;
749
750         case MAPLE_FILEERR_PHASE_ERROR:
751                 dev_notice(&mdev->dev, ERRSTR " phase error\n",
752                         mdev->port, mdev->unit);
753                 break;
754
755         case MAPLE_FILEERR_INVALID_BLOCK:
756                 dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
757                         mdev->port, mdev->unit);
758                 break;
759
760         case MAPLE_FILEERR_WRITE_ERROR:
761                 dev_notice(&mdev->dev, ERRSTR " write error\n",
762                         mdev->port, mdev->unit);
763                 break;
764
765         case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
766                 dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
767                         mdev->port, mdev->unit);
768                 break;
769
770         case MAPLE_FILEERR_BAD_CRC:
771                 dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
772                         mdev->port, mdev->unit);
773                 break;
774
775         default:
776                 dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
777                         mdev->port, mdev->unit, error);
778         }
779 }
780
781
782 static int __devinit probe_maple_vmu(struct device *dev)
783 {
784         int error;
785         struct maple_device *mdev = to_maple_dev(dev);
786         struct maple_driver *mdrv = to_maple_driver(dev->driver);
787
788         mdev->can_unload = vmu_can_unload;
789         mdev->fileerr_handler = vmu_file_error;
790         mdev->driver = mdrv;
791
792         error = vmu_connect(mdev);
793         if (error)
794                 return error;
795
796         return 0;
797 }
798
799 static int __devexit remove_maple_vmu(struct device *dev)
800 {
801         struct maple_device *mdev = to_maple_dev(dev);
802
803         vmu_disconnect(mdev);
804         return 0;
805 }
806
807 static struct maple_driver vmu_flash_driver = {
808         .function =     MAPLE_FUNC_MEMCARD,
809         .drv = {
810                 .name =         "Dreamcast_visual_memory",
811                 .probe =        probe_maple_vmu,
812                 .remove =       __devexit_p(remove_maple_vmu),
813         },
814 };
815
816 static int __init vmu_flash_map_init(void)
817 {
818         return maple_driver_register(&vmu_flash_driver);
819 }
820
821 static void __exit vmu_flash_map_exit(void)
822 {
823         maple_driver_unregister(&vmu_flash_driver);
824 }
825
826 module_init(vmu_flash_map_init);
827 module_exit(vmu_flash_map_exit);
828
829 MODULE_LICENSE("GPL");
830 MODULE_AUTHOR("Adrian McMenamin");
831 MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");