ide: use ide_build_sglist() and ide_destroy_dmatable() in non-PCI host drivers
[linux-block.git] / drivers / ide / mips / au1xxx-ide.c
CommitLineData
26a940e2
PP
1/*
2 * linux/drivers/ide/mips/au1xxx-ide.c version 01.30.00 Aug. 02 2005
3 *
4 * BRIEF MODULE DESCRIPTION
5 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
6 *
7 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
8 *
9 * This program is free software; you can redistribute it and/or modify it under
10 * the terms of the GNU General Public License as published by the Free Software
11 * Foundation; either version 2 of the License, or (at your option) any later
12 * version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
15 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
16 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 * POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along with
26 * this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
30 * Interface and Linux Device Driver" Application Note.
31 */
26a940e2
PP
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/delay.h>
8f29e650
JC
36#include <linux/platform_device.h>
37
26a940e2
PP
38#include <linux/init.h>
39#include <linux/ide.h>
40#include <linux/sysdev.h>
41
42#include <linux/dma-mapping.h>
43
8f29e650
JC
44#include "ide-timing.h"
45
26a940e2
PP
46#include <asm/io.h>
47#include <asm/mach-au1x00/au1xxx.h>
48#include <asm/mach-au1x00/au1xxx_dbdma.h>
49
26a940e2
PP
50#include <asm/mach-au1x00/au1xxx_ide.h>
51
52#define DRV_NAME "au1200-ide"
53#define DRV_VERSION "1.0"
8f29e650 54#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
26a940e2 55
8f29e650
JC
56/* enable the burstmode in the dbdma */
57#define IDE_AU1XXX_BURSTMODE 1
26a940e2 58
8f29e650
JC
59static _auide_hwif auide_hwif;
60static int dbdma_init_done;
26a940e2 61
26a940e2
PP
62#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
63
8f29e650 64void auide_insw(unsigned long port, void *addr, u32 count)
26a940e2 65{
8f29e650
JC
66 _auide_hwif *ahwif = &auide_hwif;
67 chan_tab_t *ctp;
68 au1x_ddma_desc_t *dp;
26a940e2 69
8f29e650
JC
70 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
71 DDMA_FLAGS_NOIE)) {
72 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
73 return;
74 }
75 ctp = *((chan_tab_t **)ahwif->rx_chan);
76 dp = ctp->cur_ptr;
77 while (dp->dscr_cmd0 & DSCR_CMD0_V)
78 ;
79 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
80}
81
8f29e650 82void auide_outsw(unsigned long port, void *addr, u32 count)
26a940e2 83{
8f29e650
JC
84 _auide_hwif *ahwif = &auide_hwif;
85 chan_tab_t *ctp;
86 au1x_ddma_desc_t *dp;
26a940e2 87
8f29e650
JC
88 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
89 count << 1, DDMA_FLAGS_NOIE)) {
90 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
91 return;
92 }
93 ctp = *((chan_tab_t **)ahwif->tx_chan);
94 dp = ctp->cur_ptr;
95 while (dp->dscr_cmd0 & DSCR_CMD0_V)
96 ;
97 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
26a940e2
PP
98}
99
26a940e2 100#endif
26a940e2 101
26bcb879 102static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
26a940e2 103{
88b2b32b 104 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
8f29e650
JC
105
106 /* set pio mode! */
107 switch(pio) {
108 case 0:
109 mem_sttime = SBC_IDE_TIMING(PIO0);
110
111 /* set configuration for RCS2# */
112 mem_stcfg |= TS_MASK;
113 mem_stcfg &= ~TCSOE_MASK;
114 mem_stcfg &= ~TOECS_MASK;
115 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
116 break;
117
118 case 1:
119 mem_sttime = SBC_IDE_TIMING(PIO1);
120
121 /* set configuration for RCS2# */
122 mem_stcfg |= TS_MASK;
123 mem_stcfg &= ~TCSOE_MASK;
124 mem_stcfg &= ~TOECS_MASK;
125 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
126 break;
127
128 case 2:
129 mem_sttime = SBC_IDE_TIMING(PIO2);
130
131 /* set configuration for RCS2# */
132 mem_stcfg &= ~TS_MASK;
133 mem_stcfg &= ~TCSOE_MASK;
134 mem_stcfg &= ~TOECS_MASK;
135 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
136 break;
137
138 case 3:
139 mem_sttime = SBC_IDE_TIMING(PIO3);
140
141 /* set configuration for RCS2# */
142 mem_stcfg &= ~TS_MASK;
143 mem_stcfg &= ~TCSOE_MASK;
144 mem_stcfg &= ~TOECS_MASK;
145 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
146
147 break;
148
149 case 4:
150 mem_sttime = SBC_IDE_TIMING(PIO4);
151
152 /* set configuration for RCS2# */
153 mem_stcfg &= ~TS_MASK;
154 mem_stcfg &= ~TCSOE_MASK;
155 mem_stcfg &= ~TOECS_MASK;
156 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
157 break;
158 }
159
160 au_writel(mem_sttime,MEM_STTIME2);
161 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2
PP
162}
163
88b2b32b 164static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
26a940e2 165{
88b2b32b 166 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
26a940e2 167
8f29e650 168 switch(speed) {
26a940e2 169#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
8f29e650
JC
170 case XFER_MW_DMA_2:
171 mem_sttime = SBC_IDE_TIMING(MDMA2);
172
173 /* set configuration for RCS2# */
174 mem_stcfg &= ~TS_MASK;
175 mem_stcfg &= ~TCSOE_MASK;
176 mem_stcfg &= ~TOECS_MASK;
177 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
178
8f29e650
JC
179 break;
180 case XFER_MW_DMA_1:
181 mem_sttime = SBC_IDE_TIMING(MDMA1);
182
183 /* set configuration for RCS2# */
184 mem_stcfg &= ~TS_MASK;
185 mem_stcfg &= ~TCSOE_MASK;
186 mem_stcfg &= ~TOECS_MASK;
187 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
188
8f29e650
JC
189 break;
190 case XFER_MW_DMA_0:
191 mem_sttime = SBC_IDE_TIMING(MDMA0);
192
193 /* set configuration for RCS2# */
194 mem_stcfg |= TS_MASK;
195 mem_stcfg &= ~TCSOE_MASK;
196 mem_stcfg &= ~TOECS_MASK;
197 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
198
8f29e650 199 break;
26a940e2 200#endif
8f29e650 201 }
a523a175 202
8f29e650
JC
203 au_writel(mem_sttime,MEM_STTIME2);
204 au_writel(mem_stcfg,MEM_STCFG2);
26a940e2
PP
205}
206
207/*
208 * Multi-Word DMA + DbDMA functions
209 */
26a940e2 210
8f29e650 211#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
26a940e2
PP
212static int auide_build_dmatable(ide_drive_t *drive)
213{
8f29e650
JC
214 int i, iswrite, count = 0;
215 ide_hwif_t *hwif = HWIF(drive);
216
217 struct request *rq = HWGROUP(drive)->rq;
218
219 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
220 struct scatterlist *sg;
221
222 iswrite = (rq_data_dir(rq) == WRITE);
223 /* Save for interrupt context */
224 ahwif->drive = drive;
225
062f9f02 226 hwif->sg_nents = i = ide_build_sglist(drive, rq);
8f29e650
JC
227
228 if (!i)
229 return 0;
230
231 /* fill the descriptors */
232 sg = hwif->sg_table;
233 while (i && sg_dma_len(sg)) {
234 u32 cur_addr;
235 u32 cur_len;
236
237 cur_addr = sg_dma_address(sg);
238 cur_len = sg_dma_len(sg);
239
240 while (cur_len) {
241 u32 flags = DDMA_FLAGS_NOIE;
242 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
243
244 if (++count >= PRD_ENTRIES) {
245 printk(KERN_WARNING "%s: DMA table too small\n",
246 drive->name);
247 goto use_pio_instead;
248 }
249
250 /* Lets enable intr for the last descriptor only */
251 if (1==i)
252 flags = DDMA_FLAGS_IE;
253 else
254 flags = DDMA_FLAGS_NOIE;
255
256 if (iswrite) {
257 if(!put_source_flags(ahwif->tx_chan,
45711f1a 258 (void*) sg_virt(sg),
8f29e650
JC
259 tc, flags)) {
260 printk(KERN_ERR "%s failed %d\n",
261 __FUNCTION__, __LINE__);
26a940e2 262 }
8f29e650 263 } else
26a940e2 264 {
8f29e650 265 if(!put_dest_flags(ahwif->rx_chan,
45711f1a 266 (void*) sg_virt(sg),
8f29e650
JC
267 tc, flags)) {
268 printk(KERN_ERR "%s failed %d\n",
269 __FUNCTION__, __LINE__);
26a940e2 270 }
8f29e650 271 }
26a940e2 272
8f29e650
JC
273 cur_addr += tc;
274 cur_len -= tc;
275 }
55c16a70 276 sg = sg_next(sg);
8f29e650
JC
277 i--;
278 }
26a940e2 279
8f29e650
JC
280 if (count)
281 return 1;
26a940e2 282
8f29e650 283 use_pio_instead:
062f9f02 284 ide_destroy_dmatable(drive);
26a940e2 285
8f29e650 286 return 0; /* revert to PIO for this request */
26a940e2
PP
287}
288
289static int auide_dma_end(ide_drive_t *drive)
290{
8f29e650 291 ide_hwif_t *hwif = HWIF(drive);
26a940e2 292
8f29e650 293 if (hwif->sg_nents) {
062f9f02 294 ide_destroy_dmatable(drive);
8f29e650
JC
295 hwif->sg_nents = 0;
296 }
26a940e2 297
8f29e650 298 return 0;
26a940e2
PP
299}
300
301static void auide_dma_start(ide_drive_t *drive )
302{
26a940e2
PP
303}
304
26a940e2
PP
305
306static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
307{
8f29e650
JC
308 /* issue cmd to drive */
309 ide_execute_command(drive, command, &ide_dma_intr,
310 (2*WAIT_CMD), NULL);
26a940e2
PP
311}
312
313static int auide_dma_setup(ide_drive_t *drive)
8f29e650
JC
314{
315 struct request *rq = HWGROUP(drive)->rq;
26a940e2 316
8f29e650
JC
317 if (!auide_build_dmatable(drive)) {
318 ide_map_sg(drive, rq);
319 return 1;
320 }
26a940e2 321
8f29e650
JC
322 drive->waiting_for_dma = 1;
323 return 0;
26a940e2
PP
324}
325
8446f659 326static u8 auide_mdma_filter(ide_drive_t *drive)
26a940e2 327{
8446f659
BZ
328 /*
329 * FIXME: ->white_list and ->black_list are based on completely bogus
330 * ->ide_dma_check implementation which didn't set neither the host
331 * controller timings nor the device for the desired transfer mode.
332 *
333 * They should be either removed or 0x00 MWDMA mask should be
334 * returned for devices on the ->black_list.
335 */
8f29e650 336
8446f659 337 if (dbdma_init_done == 0) {
8f29e650
JC
338 auide_hwif.white_list = ide_in_drive_list(drive->id,
339 dma_white_list);
340 auide_hwif.black_list = ide_in_drive_list(drive->id,
341 dma_black_list);
342 auide_hwif.drive = drive;
343 auide_ddma_init(&auide_hwif);
344 dbdma_init_done = 1;
345 }
26a940e2 346
8f29e650 347 /* Is the drive in our DMA black list? */
8446f659 348 if (auide_hwif.black_list)
8f29e650 349 printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
8446f659 350 drive->name, drive->id->model);
8f29e650 351
8446f659
BZ
352 return drive->hwif->mwdma_mask;
353}
354
26a940e2 355static int auide_dma_test_irq(ide_drive_t *drive)
8f29e650
JC
356{
357 if (drive->waiting_for_dma == 0)
358 printk(KERN_WARNING "%s: ide_dma_test_irq \
26a940e2
PP
359 called while not waiting\n", drive->name);
360
8f29e650
JC
361 /* If dbdma didn't execute the STOP command yet, the
362 * active bit is still set
26a940e2 363 */
8f29e650
JC
364 drive->waiting_for_dma++;
365 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
366 printk(KERN_WARNING "%s: timeout waiting for ddma to \
26a940e2 367 complete\n", drive->name);
8f29e650
JC
368 return 1;
369 }
370 udelay(10);
371 return 0;
26a940e2
PP
372}
373
15ce926a 374static void auide_dma_host_set(ide_drive_t *drive, int on)
26a940e2 375{
26a940e2
PP
376}
377
841d2a9b 378static void auide_dma_lost_irq(ide_drive_t *drive)
26a940e2 379{
8f29e650 380 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
26a940e2
PP
381}
382
53e62d3a 383static void auide_ddma_tx_callback(int irq, void *param)
26a940e2 384{
8f29e650
JC
385 _auide_hwif *ahwif = (_auide_hwif*)param;
386 ahwif->drive->waiting_for_dma = 0;
26a940e2
PP
387}
388
53e62d3a 389static void auide_ddma_rx_callback(int irq, void *param)
26a940e2 390{
8f29e650
JC
391 _auide_hwif *ahwif = (_auide_hwif*)param;
392 ahwif->drive->waiting_for_dma = 0;
393}
394
395#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
26a940e2 396
8f29e650
JC
397static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
398{
399 dev->dev_id = dev_id;
400 dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
401 dev->dev_intlevel = 0;
402 dev->dev_intpolarity = 0;
403 dev->dev_tsize = tsize;
404 dev->dev_devwidth = devwidth;
405 dev->dev_flags = flags;
26a940e2 406}
8f29e650
JC
407
408#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
26a940e2 409
c283f5db 410static void auide_dma_timeout(ide_drive_t *drive)
26a940e2 411{
c283f5db 412 ide_hwif_t *hwif = HWIF(drive);
26a940e2 413
8f29e650 414 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
26a940e2 415
c283f5db
SS
416 if (hwif->ide_dma_test_irq(drive))
417 return;
26a940e2 418
c283f5db 419 hwif->ide_dma_end(drive);
26a940e2 420}
8f29e650 421
26a940e2 422
8f29e650
JC
423static int auide_ddma_init(_auide_hwif *auide) {
424
425 dbdev_tab_t source_dev_tab, target_dev_tab;
426 u32 dev_id, tsize, devwidth, flags;
427 ide_hwif_t *hwif = auide->hwif;
26a940e2 428
8f29e650 429 dev_id = AU1XXX_ATA_DDMA_REQ;
26a940e2 430
8f29e650
JC
431 if (auide->white_list || auide->black_list) {
432 tsize = 8;
433 devwidth = 32;
434 }
435 else {
436 tsize = 1;
437 devwidth = 16;
438
439 printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
440 printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
441 }
26a940e2 442
8f29e650
JC
443#ifdef IDE_AU1XXX_BURSTMODE
444 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
26a940e2 445#else
8f29e650 446 flags = DEV_FLAGS_SYNC;
26a940e2
PP
447#endif
448
8f29e650
JC
449 /* setup dev_tab for tx channel */
450 auide_init_dbdma_dev( &source_dev_tab,
451 dev_id,
452 tsize, devwidth, DEV_FLAGS_OUT | flags);
453 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
454
455 auide_init_dbdma_dev( &source_dev_tab,
456 dev_id,
457 tsize, devwidth, DEV_FLAGS_IN | flags);
458 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
459
460 /* We also need to add a target device for the DMA */
461 auide_init_dbdma_dev( &target_dev_tab,
462 (u32)DSCR_CMD0_ALWAYS,
463 tsize, devwidth, DEV_FLAGS_ANYUSE);
464 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
465
466 /* Get a channel for TX */
467 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
468 auide->tx_dev_id,
469 auide_ddma_tx_callback,
470 (void*)auide);
471
472 /* Get a channel for RX */
473 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
474 auide->target_dev_id,
475 auide_ddma_rx_callback,
476 (void*)auide);
477
478 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
479 NUM_DESCRIPTORS);
480 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
481 NUM_DESCRIPTORS);
482
5df37c34 483 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
8f29e650
JC
484 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
485 &hwif->dmatable_dma, GFP_KERNEL);
486
487 au1xxx_dbdma_start( auide->tx_chan );
488 au1xxx_dbdma_start( auide->rx_chan );
489
490 return 0;
491}
26a940e2 492#else
8f29e650
JC
493
494static int auide_ddma_init( _auide_hwif *auide )
495{
496 dbdev_tab_t source_dev_tab;
497 int flags;
26a940e2 498
8f29e650
JC
499#ifdef IDE_AU1XXX_BURSTMODE
500 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
501#else
502 flags = DEV_FLAGS_SYNC;
26a940e2 503#endif
26a940e2 504
8f29e650
JC
505 /* setup dev_tab for tx channel */
506 auide_init_dbdma_dev( &source_dev_tab,
507 (u32)DSCR_CMD0_ALWAYS,
508 8, 32, DEV_FLAGS_OUT | flags);
509 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
510
511 auide_init_dbdma_dev( &source_dev_tab,
512 (u32)DSCR_CMD0_ALWAYS,
513 8, 32, DEV_FLAGS_IN | flags);
514 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
515
516 /* Get a channel for TX */
517 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
518 auide->tx_dev_id,
519 NULL,
520 (void*)auide);
521
522 /* Get a channel for RX */
523 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
524 DSCR_CMD0_ALWAYS,
525 NULL,
526 (void*)auide);
527
528 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
529 NUM_DESCRIPTORS);
530 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
531 NUM_DESCRIPTORS);
532
533 au1xxx_dbdma_start( auide->tx_chan );
534 au1xxx_dbdma_start( auide->rx_chan );
535
536 return 0;
26a940e2 537}
8f29e650 538#endif
26a940e2
PP
539
540static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
541{
8f29e650
JC
542 int i;
543 unsigned long *ata_regs = hw->io_ports;
544
545 /* FIXME? */
546 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
547 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
548 }
549
550 /* set the Alternative Status register */
551 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
26a940e2
PP
552}
553
554static int au_ide_probe(struct device *dev)
555{
556 struct platform_device *pdev = to_platform_device(dev);
8f29e650
JC
557 _auide_hwif *ahwif = &auide_hwif;
558 ide_hwif_t *hwif;
26a940e2
PP
559 struct resource *res;
560 int ret = 0;
8447d9d5 561 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
9239b333 562 hw_regs_t hw;
26a940e2
PP
563
564#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
8f29e650 565 char *mode = "MWDMA2";
26a940e2 566#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
8f29e650 567 char *mode = "PIO+DDMA(offload)";
26a940e2
PP
568#endif
569
8f29e650 570 memset(&auide_hwif, 0, sizeof(_auide_hwif));
26a940e2
PP
571 ahwif->irq = platform_get_irq(pdev, 0);
572
573 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
574
575 if (res == NULL) {
576 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
577 ret = -ENODEV;
48944738
DV
578 goto out;
579 }
580 if (ahwif->irq < 0) {
581 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
582 ret = -ENODEV;
26a940e2
PP
583 goto out;
584 }
585
8f29e650 586 if (!request_mem_region (res->start, res->end-res->start, pdev->name)) {
26a940e2 587 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
8f29e650 588 ret = -EBUSY;
26a940e2 589 goto out;
8f29e650 590 }
26a940e2
PP
591
592 ahwif->regbase = (u32)ioremap(res->start, res->end-res->start);
593 if (ahwif->regbase == 0) {
594 ret = -ENOMEM;
595 goto out;
596 }
597
8f29e650
JC
598 /* FIXME: This might possibly break PCMCIA IDE devices */
599
600 hwif = &ide_hwifs[pdev->id];
26a940e2 601
9239b333
BZ
602 memset(&hw, 0, sizeof(hw));
603 auide_setup_ports(&hw, ahwif);
aa79a2fa
BZ
604 hw.irq = ahwif->irq;
605 hw.chipset = ide_au1xxx;
606
607 ide_init_port_hw(hwif, &hw);
26a940e2 608
5df37c34
BZ
609 hwif->dev = dev;
610
8f29e650 611 hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
26a940e2 612#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
8f29e650
JC
613 hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */
614 hwif->swdma_mask = 0x00;
26a940e2 615#else
8f29e650
JC
616 hwif->mwdma_mask = 0x0;
617 hwif->swdma_mask = 0x0;
618#endif
619
4099d143 620 hwif->pio_mask = ATA_PIO4;
88b2b32b 621 hwif->host_flags = IDE_HFLAG_POST_SET_MODE;
4099d143 622
8f29e650
JC
623 hwif->drives[0].unmask = 1;
624 hwif->drives[1].unmask = 1;
625
626 /* hold should be on in all cases */
627 hwif->hold = 1;
2ad1e558
BZ
628
629 hwif->mmio = 1;
8f29e650
JC
630
631 /* If the user has selected DDMA assisted copies,
632 then set up a few local I/O function entry points
633 */
634
635#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
636 hwif->INSW = auide_insw;
637 hwif->OUTSW = auide_outsw;
26a940e2 638#endif
8f29e650 639
26bcb879 640 hwif->set_pio_mode = &au1xxx_set_pio_mode;
88b2b32b 641 hwif->set_dma_mode = &auide_set_dma_mode;
26a940e2
PP
642
643#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
c283f5db 644 hwif->dma_timeout = &auide_dma_timeout;
8f29e650 645
8446f659
BZ
646 hwif->mdma_filter = &auide_mdma_filter;
647
15ce926a 648 hwif->dma_host_set = &auide_dma_host_set;
8f29e650
JC
649 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
650 hwif->dma_start = &auide_dma_start;
651 hwif->ide_dma_end = &auide_dma_end;
652 hwif->dma_setup = &auide_dma_setup;
653 hwif->ide_dma_test_irq = &auide_dma_test_irq;
841d2a9b 654 hwif->dma_lost_irq = &auide_dma_lost_irq;
a42bcc0f 655#endif
8f29e650 656 hwif->channel = 0;
8f29e650
JC
657 hwif->select_data = 0; /* no chipset-specific code */
658 hwif->config_data = 0; /* no chipset-specific code */
659
8f29e650 660 hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */
a05e2faa 661 hwif->drives[1].autotune = 1;
a42bcc0f 662
a05e2faa
BZ
663 hwif->drives[0].no_io_32bit = 1;
664 hwif->drives[1].no_io_32bit = 1;
26a940e2 665
8f29e650
JC
666 auide_hwif.hwif = hwif;
667 hwif->hwif_data = &auide_hwif;
26a940e2 668
8f29e650
JC
669#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
670 auide_ddma_init(&auide_hwif);
671 dbdma_init_done = 1;
26a940e2
PP
672#endif
673
8447d9d5 674 idx[0] = hwif->index;
5cbf79cd 675
8447d9d5 676 ide_device_add(idx);
5cbf79cd 677
26a940e2
PP
678 dev_set_drvdata(dev, hwif);
679
8f29e650 680 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
26a940e2 681
8f29e650
JC
682 out:
683 return ret;
26a940e2
PP
684}
685
686static int au_ide_remove(struct device *dev)
687{
688 struct platform_device *pdev = to_platform_device(dev);
689 struct resource *res;
690 ide_hwif_t *hwif = dev_get_drvdata(dev);
8f29e650 691 _auide_hwif *ahwif = &auide_hwif;
26a940e2
PP
692
693 ide_unregister(hwif - ide_hwifs);
694
695 iounmap((void *)ahwif->regbase);
696
697 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
698 release_mem_region(res->start, res->end - res->start);
699
700 return 0;
701}
702
703static struct device_driver au1200_ide_driver = {
704 .name = "au1200-ide",
705 .bus = &platform_bus_type,
706 .probe = au_ide_probe,
707 .remove = au_ide_remove,
708};
709
710static int __init au_ide_init(void)
711{
712 return driver_register(&au1200_ide_driver);
713}
714
8f29e650 715static void __exit au_ide_exit(void)
26a940e2
PP
716{
717 driver_unregister(&au1200_ide_driver);
718}
719
26a940e2
PP
720MODULE_LICENSE("GPL");
721MODULE_DESCRIPTION("AU1200 IDE driver");
722
723module_init(au_ide_init);
724module_exit(au_ide_exit);