Commit | Line | Data |
---|---|---|
edea3ab5 ML |
1 | /* |
2 | * pdc_adma.c - Pacific Digital Corporation ADMA | |
3 | * | |
4 | * Maintained by: Mark Lord <mlord@pobox.com> | |
5 | * | |
6 | * Copyright 2005 Mark Lord | |
7 | * | |
68399bb5 JG |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; see the file COPYING. If not, write to | |
20 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | * | |
22 | * | |
23 | * libata documentation is available via 'make {ps|pdf}docs', | |
24 | * as Documentation/DocBook/libata.* | |
edea3ab5 | 25 | * |
edea3ab5 ML |
26 | * |
27 | * Supports ATA disks in single-packet ADMA mode. | |
28 | * Uses PIO for everything else. | |
29 | * | |
30 | * TODO: Use ADMA transfers for ATAPI devices, when possible. | |
31 | * This requires careful attention to a number of quirks of the chip. | |
32 | * | |
33 | */ | |
34 | ||
35 | #include <linux/kernel.h> | |
36 | #include <linux/module.h> | |
37 | #include <linux/pci.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/blkdev.h> | |
40 | #include <linux/delay.h> | |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/sched.h> | |
a9524a76 | 43 | #include <linux/device.h> |
edea3ab5 ML |
44 | #include <scsi/scsi_host.h> |
45 | #include <asm/io.h> | |
46 | #include <linux/libata.h> | |
47 | ||
48 | #define DRV_NAME "pdc_adma" | |
af64371a | 49 | #define DRV_VERSION "0.04" |
edea3ab5 ML |
50 | |
51 | /* macro to calculate base address for ATA regs */ | |
52 | #define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) | |
53 | ||
54 | /* macro to calculate base address for ADMA regs */ | |
55 | #define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20)) | |
56 | ||
57 | enum { | |
58 | ADMA_PORTS = 2, | |
59 | ADMA_CPB_BYTES = 40, | |
60 | ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16, | |
61 | ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES, | |
62 | ||
63 | ADMA_DMA_BOUNDARY = 0xffffffff, | |
64 | ||
65 | /* global register offsets */ | |
66 | ADMA_MODE_LOCK = 0x00c7, | |
67 | ||
68 | /* per-channel register offsets */ | |
69 | ADMA_CONTROL = 0x0000, /* ADMA control */ | |
70 | ADMA_STATUS = 0x0002, /* ADMA status */ | |
71 | ADMA_CPB_COUNT = 0x0004, /* CPB count */ | |
72 | ADMA_CPB_CURRENT = 0x000c, /* current CPB address */ | |
73 | ADMA_CPB_NEXT = 0x000c, /* next CPB address */ | |
74 | ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */ | |
75 | ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */ | |
76 | ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */ | |
77 | ||
78 | /* ADMA_CONTROL register bits */ | |
79 | aNIEN = (1 << 8), /* irq mask: 1==masked */ | |
80 | aGO = (1 << 7), /* packet trigger ("Go!") */ | |
81 | aRSTADM = (1 << 5), /* ADMA logic reset */ | |
edea3ab5 ML |
82 | aPIOMD4 = 0x0003, /* PIO mode 4 */ |
83 | ||
84 | /* ADMA_STATUS register bits */ | |
85 | aPSD = (1 << 6), | |
86 | aUIRQ = (1 << 4), | |
87 | aPERR = (1 << 0), | |
88 | ||
89 | /* CPB bits */ | |
90 | cDONE = (1 << 0), | |
91 | cVLD = (1 << 0), | |
92 | cDAT = (1 << 2), | |
93 | cIEN = (1 << 3), | |
94 | ||
95 | /* PRD bits */ | |
96 | pORD = (1 << 4), | |
97 | pDIRO = (1 << 5), | |
98 | pEND = (1 << 7), | |
99 | ||
100 | /* ATA register flags */ | |
101 | rIGN = (1 << 5), | |
102 | rEND = (1 << 7), | |
103 | ||
104 | /* ATA register addresses */ | |
105 | ADMA_REGS_CONTROL = 0x0e, | |
106 | ADMA_REGS_SECTOR_COUNT = 0x12, | |
107 | ADMA_REGS_LBA_LOW = 0x13, | |
108 | ADMA_REGS_LBA_MID = 0x14, | |
109 | ADMA_REGS_LBA_HIGH = 0x15, | |
110 | ADMA_REGS_DEVICE = 0x16, | |
111 | ADMA_REGS_COMMAND = 0x17, | |
112 | ||
113 | /* PCI device IDs */ | |
114 | board_1841_idx = 0, /* ADMA 2-port controller */ | |
115 | }; | |
116 | ||
117 | typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t; | |
118 | ||
119 | struct adma_port_priv { | |
120 | u8 *pkt; | |
121 | dma_addr_t pkt_dma; | |
122 | adma_state_t state; | |
123 | }; | |
124 | ||
125 | static int adma_ata_init_one (struct pci_dev *pdev, | |
126 | const struct pci_device_id *ent); | |
127 | static irqreturn_t adma_intr (int irq, void *dev_instance, | |
128 | struct pt_regs *regs); | |
129 | static int adma_port_start(struct ata_port *ap); | |
130 | static void adma_host_stop(struct ata_host_set *host_set); | |
131 | static void adma_port_stop(struct ata_port *ap); | |
132 | static void adma_phy_reset(struct ata_port *ap); | |
133 | static void adma_qc_prep(struct ata_queued_cmd *qc); | |
9a3d9eb0 | 134 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); |
edea3ab5 ML |
135 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc); |
136 | static void adma_bmdma_stop(struct ata_queued_cmd *qc); | |
137 | static u8 adma_bmdma_status(struct ata_port *ap); | |
138 | static void adma_irq_clear(struct ata_port *ap); | |
139 | static void adma_eng_timeout(struct ata_port *ap); | |
140 | ||
193515d5 | 141 | static struct scsi_host_template adma_ata_sht = { |
edea3ab5 ML |
142 | .module = THIS_MODULE, |
143 | .name = DRV_NAME, | |
144 | .ioctl = ata_scsi_ioctl, | |
145 | .queuecommand = ata_scsi_queuecmd, | |
edea3ab5 ML |
146 | .can_queue = ATA_DEF_QUEUE, |
147 | .this_id = ATA_SHT_THIS_ID, | |
148 | .sg_tablesize = LIBATA_MAX_PRD, | |
edea3ab5 ML |
149 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
150 | .emulated = ATA_SHT_EMULATED, | |
151 | .use_clustering = ENABLE_CLUSTERING, | |
152 | .proc_name = DRV_NAME, | |
153 | .dma_boundary = ADMA_DMA_BOUNDARY, | |
154 | .slave_configure = ata_scsi_slave_config, | |
ccf68c34 | 155 | .slave_destroy = ata_scsi_slave_destroy, |
edea3ab5 ML |
156 | .bios_param = ata_std_bios_param, |
157 | }; | |
158 | ||
057ace5e | 159 | static const struct ata_port_operations adma_ata_ops = { |
edea3ab5 ML |
160 | .port_disable = ata_port_disable, |
161 | .tf_load = ata_tf_load, | |
162 | .tf_read = ata_tf_read, | |
163 | .check_status = ata_check_status, | |
164 | .check_atapi_dma = adma_check_atapi_dma, | |
165 | .exec_command = ata_exec_command, | |
166 | .dev_select = ata_std_dev_select, | |
167 | .phy_reset = adma_phy_reset, | |
168 | .qc_prep = adma_qc_prep, | |
169 | .qc_issue = adma_qc_issue, | |
170 | .eng_timeout = adma_eng_timeout, | |
1049cb47 | 171 | .data_xfer = ata_mmio_data_xfer, |
edea3ab5 ML |
172 | .irq_handler = adma_intr, |
173 | .irq_clear = adma_irq_clear, | |
174 | .port_start = adma_port_start, | |
175 | .port_stop = adma_port_stop, | |
176 | .host_stop = adma_host_stop, | |
177 | .bmdma_stop = adma_bmdma_stop, | |
178 | .bmdma_status = adma_bmdma_status, | |
179 | }; | |
180 | ||
181 | static struct ata_port_info adma_port_info[] = { | |
182 | /* board_1841_idx */ | |
183 | { | |
184 | .sht = &adma_ata_sht, | |
185 | .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | | |
186 | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, | |
187 | .pio_mask = 0x10, /* pio4 */ | |
188 | .udma_mask = 0x1f, /* udma0-4 */ | |
189 | .port_ops = &adma_ata_ops, | |
190 | }, | |
191 | }; | |
192 | ||
3b7d697d | 193 | static const struct pci_device_id adma_ata_pci_tbl[] = { |
edea3ab5 ML |
194 | { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
195 | board_1841_idx }, | |
196 | ||
197 | { } /* terminate list */ | |
198 | }; | |
199 | ||
200 | static struct pci_driver adma_ata_pci_driver = { | |
201 | .name = DRV_NAME, | |
202 | .id_table = adma_ata_pci_tbl, | |
203 | .probe = adma_ata_init_one, | |
204 | .remove = ata_pci_remove_one, | |
205 | }; | |
206 | ||
207 | static int adma_check_atapi_dma(struct ata_queued_cmd *qc) | |
208 | { | |
209 | return 1; /* ATAPI DMA not yet supported */ | |
210 | } | |
211 | ||
212 | static void adma_bmdma_stop(struct ata_queued_cmd *qc) | |
213 | { | |
214 | /* nothing */ | |
215 | } | |
216 | ||
217 | static u8 adma_bmdma_status(struct ata_port *ap) | |
218 | { | |
219 | return 0; | |
220 | } | |
221 | ||
222 | static void adma_irq_clear(struct ata_port *ap) | |
223 | { | |
224 | /* nothing */ | |
225 | } | |
226 | ||
227 | static void adma_reset_engine(void __iomem *chan) | |
228 | { | |
229 | /* reset ADMA to idle state */ | |
230 | writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); | |
231 | udelay(2); | |
232 | writew(aPIOMD4, chan + ADMA_CONTROL); | |
233 | udelay(2); | |
234 | } | |
235 | ||
236 | static void adma_reinit_engine(struct ata_port *ap) | |
237 | { | |
238 | struct adma_port_priv *pp = ap->private_data; | |
239 | void __iomem *mmio_base = ap->host_set->mmio_base; | |
240 | void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no); | |
241 | ||
242 | /* mask/clear ATA interrupts */ | |
243 | writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr); | |
244 | ata_check_status(ap); | |
245 | ||
246 | /* reset the ADMA engine */ | |
247 | adma_reset_engine(chan); | |
248 | ||
249 | /* set in-FIFO threshold to 0x100 */ | |
250 | writew(0x100, chan + ADMA_FIFO_IN); | |
251 | ||
252 | /* set CPB pointer */ | |
253 | writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT); | |
254 | ||
255 | /* set out-FIFO threshold to 0x100 */ | |
256 | writew(0x100, chan + ADMA_FIFO_OUT); | |
257 | ||
258 | /* set CPB count */ | |
259 | writew(1, chan + ADMA_CPB_COUNT); | |
260 | ||
261 | /* read/discard ADMA status */ | |
262 | readb(chan + ADMA_STATUS); | |
263 | } | |
264 | ||
265 | static inline void adma_enter_reg_mode(struct ata_port *ap) | |
266 | { | |
267 | void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); | |
268 | ||
269 | writew(aPIOMD4, chan + ADMA_CONTROL); | |
270 | readb(chan + ADMA_STATUS); /* flush */ | |
271 | } | |
272 | ||
273 | static void adma_phy_reset(struct ata_port *ap) | |
274 | { | |
275 | struct adma_port_priv *pp = ap->private_data; | |
276 | ||
277 | pp->state = adma_state_idle; | |
278 | adma_reinit_engine(ap); | |
279 | ata_port_probe(ap); | |
280 | ata_bus_reset(ap); | |
281 | } | |
282 | ||
283 | static void adma_eng_timeout(struct ata_port *ap) | |
284 | { | |
285 | struct adma_port_priv *pp = ap->private_data; | |
286 | ||
287 | if (pp->state != adma_state_idle) /* healthy paranoia */ | |
288 | pp->state = adma_state_mmio; | |
289 | adma_reinit_engine(ap); | |
290 | ata_eng_timeout(ap); | |
291 | } | |
292 | ||
293 | static int adma_fill_sg(struct ata_queued_cmd *qc) | |
294 | { | |
972c26bd | 295 | struct scatterlist *sg; |
edea3ab5 ML |
296 | struct ata_port *ap = qc->ap; |
297 | struct adma_port_priv *pp = ap->private_data; | |
298 | u8 *buf = pp->pkt; | |
972c26bd | 299 | int i = (2 + buf[3]) * 8; |
edea3ab5 ML |
300 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); |
301 | ||
972c26bd | 302 | ata_for_each_sg(sg, qc) { |
edea3ab5 ML |
303 | u32 addr; |
304 | u32 len; | |
305 | ||
306 | addr = (u32)sg_dma_address(sg); | |
307 | *(__le32 *)(buf + i) = cpu_to_le32(addr); | |
308 | i += 4; | |
309 | ||
310 | len = sg_dma_len(sg) >> 3; | |
311 | *(__le32 *)(buf + i) = cpu_to_le32(len); | |
312 | i += 4; | |
313 | ||
972c26bd | 314 | if (ata_sg_is_last(sg, qc)) |
edea3ab5 ML |
315 | pFLAGS |= pEND; |
316 | buf[i++] = pFLAGS; | |
317 | buf[i++] = qc->dev->dma_mode & 0xf; | |
318 | buf[i++] = 0; /* pPKLW */ | |
319 | buf[i++] = 0; /* reserved */ | |
320 | ||
321 | *(__le32 *)(buf + i) | |
322 | = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); | |
323 | i += 4; | |
324 | ||
db7f44d9 | 325 | VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, |
edea3ab5 ML |
326 | (unsigned long)addr, len); |
327 | } | |
328 | return i; | |
329 | } | |
330 | ||
331 | static void adma_qc_prep(struct ata_queued_cmd *qc) | |
332 | { | |
333 | struct adma_port_priv *pp = qc->ap->private_data; | |
334 | u8 *buf = pp->pkt; | |
335 | u32 pkt_dma = (u32)pp->pkt_dma; | |
336 | int i = 0; | |
337 | ||
338 | VPRINTK("ENTER\n"); | |
339 | ||
340 | adma_enter_reg_mode(qc->ap); | |
341 | if (qc->tf.protocol != ATA_PROT_DMA) { | |
342 | ata_qc_prep(qc); | |
343 | return; | |
344 | } | |
345 | ||
346 | buf[i++] = 0; /* Response flags */ | |
347 | buf[i++] = 0; /* reserved */ | |
348 | buf[i++] = cVLD | cDAT | cIEN; | |
349 | i++; /* cLEN, gets filled in below */ | |
350 | ||
351 | *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */ | |
352 | i += 4; /* cNCPB */ | |
353 | i += 4; /* cPRD, gets filled in below */ | |
354 | ||
355 | buf[i++] = 0; /* reserved */ | |
356 | buf[i++] = 0; /* reserved */ | |
357 | buf[i++] = 0; /* reserved */ | |
358 | buf[i++] = 0; /* reserved */ | |
359 | ||
360 | /* ATA registers; must be a multiple of 4 */ | |
361 | buf[i++] = qc->tf.device; | |
362 | buf[i++] = ADMA_REGS_DEVICE; | |
363 | if ((qc->tf.flags & ATA_TFLAG_LBA48)) { | |
364 | buf[i++] = qc->tf.hob_nsect; | |
365 | buf[i++] = ADMA_REGS_SECTOR_COUNT; | |
366 | buf[i++] = qc->tf.hob_lbal; | |
367 | buf[i++] = ADMA_REGS_LBA_LOW; | |
368 | buf[i++] = qc->tf.hob_lbam; | |
369 | buf[i++] = ADMA_REGS_LBA_MID; | |
370 | buf[i++] = qc->tf.hob_lbah; | |
371 | buf[i++] = ADMA_REGS_LBA_HIGH; | |
372 | } | |
373 | buf[i++] = qc->tf.nsect; | |
374 | buf[i++] = ADMA_REGS_SECTOR_COUNT; | |
375 | buf[i++] = qc->tf.lbal; | |
376 | buf[i++] = ADMA_REGS_LBA_LOW; | |
377 | buf[i++] = qc->tf.lbam; | |
378 | buf[i++] = ADMA_REGS_LBA_MID; | |
379 | buf[i++] = qc->tf.lbah; | |
380 | buf[i++] = ADMA_REGS_LBA_HIGH; | |
381 | buf[i++] = 0; | |
382 | buf[i++] = ADMA_REGS_CONTROL; | |
383 | buf[i++] = rIGN; | |
384 | buf[i++] = 0; | |
385 | buf[i++] = qc->tf.command; | |
386 | buf[i++] = ADMA_REGS_COMMAND | rEND; | |
387 | ||
388 | buf[3] = (i >> 3) - 2; /* cLEN */ | |
389 | *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */ | |
390 | ||
391 | i = adma_fill_sg(qc); | |
392 | wmb(); /* flush PRDs and pkt to memory */ | |
393 | #if 0 | |
394 | /* dump out CPB + PRDs for debug */ | |
395 | { | |
396 | int j, len = 0; | |
397 | static char obuf[2048]; | |
398 | for (j = 0; j < i; ++j) { | |
399 | len += sprintf(obuf+len, "%02x ", buf[j]); | |
400 | if ((j & 7) == 7) { | |
401 | printk("%s\n", obuf); | |
402 | len = 0; | |
403 | } | |
404 | } | |
405 | if (len) | |
406 | printk("%s\n", obuf); | |
407 | } | |
408 | #endif | |
409 | } | |
410 | ||
411 | static inline void adma_packet_start(struct ata_queued_cmd *qc) | |
412 | { | |
413 | struct ata_port *ap = qc->ap; | |
414 | void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); | |
415 | ||
416 | VPRINTK("ENTER, ap %p\n", ap); | |
417 | ||
418 | /* fire up the ADMA engine */ | |
68399bb5 | 419 | writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); |
edea3ab5 ML |
420 | } |
421 | ||
9a3d9eb0 | 422 | static unsigned int adma_qc_issue(struct ata_queued_cmd *qc) |
edea3ab5 ML |
423 | { |
424 | struct adma_port_priv *pp = qc->ap->private_data; | |
425 | ||
426 | switch (qc->tf.protocol) { | |
427 | case ATA_PROT_DMA: | |
428 | pp->state = adma_state_pkt; | |
429 | adma_packet_start(qc); | |
430 | return 0; | |
431 | ||
432 | case ATA_PROT_ATAPI_DMA: | |
433 | BUG(); | |
434 | break; | |
435 | ||
436 | default: | |
437 | break; | |
438 | } | |
439 | ||
440 | pp->state = adma_state_mmio; | |
441 | return ata_qc_issue_prot(qc); | |
442 | } | |
443 | ||
444 | static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) | |
445 | { | |
446 | unsigned int handled = 0, port_no; | |
447 | u8 __iomem *mmio_base = host_set->mmio_base; | |
448 | ||
449 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | |
450 | struct ata_port *ap = host_set->ports[port_no]; | |
451 | struct adma_port_priv *pp; | |
452 | struct ata_queued_cmd *qc; | |
453 | void __iomem *chan = ADMA_REGS(mmio_base, port_no); | |
a7dac447 | 454 | u8 status = readb(chan + ADMA_STATUS); |
edea3ab5 ML |
455 | |
456 | if (status == 0) | |
457 | continue; | |
458 | handled = 1; | |
459 | adma_enter_reg_mode(ap); | |
029f5468 | 460 | if (ap->flags & ATA_FLAG_DISABLED) |
edea3ab5 ML |
461 | continue; |
462 | pp = ap->private_data; | |
463 | if (!pp || pp->state != adma_state_pkt) | |
464 | continue; | |
465 | qc = ata_qc_from_tag(ap, ap->active_tag); | |
94ec1ef1 | 466 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
a21a84a3 | 467 | if ((status & (aPERR | aPSD | aUIRQ))) |
a22e2eb0 | 468 | qc->err_mask |= AC_ERR_OTHER; |
a21a84a3 | 469 | else if (pp->pkt[0] != cDONE) |
a22e2eb0 | 470 | qc->err_mask |= AC_ERR_OTHER; |
a7dac447 | 471 | |
a22e2eb0 | 472 | ata_qc_complete(qc); |
a21a84a3 | 473 | } |
edea3ab5 ML |
474 | } |
475 | return handled; | |
476 | } | |
477 | ||
478 | static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) | |
479 | { | |
480 | unsigned int handled = 0, port_no; | |
481 | ||
482 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | |
483 | struct ata_port *ap; | |
484 | ap = host_set->ports[port_no]; | |
029f5468 | 485 | if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { |
edea3ab5 ML |
486 | struct ata_queued_cmd *qc; |
487 | struct adma_port_priv *pp = ap->private_data; | |
488 | if (!pp || pp->state != adma_state_mmio) | |
489 | continue; | |
490 | qc = ata_qc_from_tag(ap, ap->active_tag); | |
be697c3f | 491 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
edea3ab5 ML |
492 | |
493 | /* check main status, clearing INTRQ */ | |
ac19bff2 | 494 | u8 status = ata_check_status(ap); |
edea3ab5 ML |
495 | if ((status & ATA_BUSY)) |
496 | continue; | |
497 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | |
498 | ap->id, qc->tf.protocol, status); | |
499 | ||
500 | /* complete taskfile transaction */ | |
501 | pp->state = adma_state_idle; | |
a22e2eb0 AL |
502 | qc->err_mask |= ac_err_mask(status); |
503 | ata_qc_complete(qc); | |
edea3ab5 ML |
504 | handled = 1; |
505 | } | |
506 | } | |
507 | } | |
508 | return handled; | |
509 | } | |
510 | ||
511 | static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) | |
512 | { | |
513 | struct ata_host_set *host_set = dev_instance; | |
514 | unsigned int handled = 0; | |
515 | ||
516 | VPRINTK("ENTER\n"); | |
517 | ||
518 | spin_lock(&host_set->lock); | |
519 | handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set); | |
520 | spin_unlock(&host_set->lock); | |
521 | ||
522 | VPRINTK("EXIT\n"); | |
523 | ||
524 | return IRQ_RETVAL(handled); | |
525 | } | |
526 | ||
527 | static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base) | |
528 | { | |
529 | port->cmd_addr = | |
530 | port->data_addr = base + 0x000; | |
531 | port->error_addr = | |
532 | port->feature_addr = base + 0x004; | |
533 | port->nsect_addr = base + 0x008; | |
534 | port->lbal_addr = base + 0x00c; | |
535 | port->lbam_addr = base + 0x010; | |
536 | port->lbah_addr = base + 0x014; | |
537 | port->device_addr = base + 0x018; | |
538 | port->status_addr = | |
539 | port->command_addr = base + 0x01c; | |
540 | port->altstatus_addr = | |
541 | port->ctl_addr = base + 0x038; | |
542 | } | |
543 | ||
544 | static int adma_port_start(struct ata_port *ap) | |
545 | { | |
546 | struct device *dev = ap->host_set->dev; | |
547 | struct adma_port_priv *pp; | |
548 | int rc; | |
549 | ||
550 | rc = ata_port_start(ap); | |
551 | if (rc) | |
552 | return rc; | |
553 | adma_enter_reg_mode(ap); | |
554 | rc = -ENOMEM; | |
555 | pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); | |
556 | if (!pp) | |
557 | goto err_out; | |
558 | pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, | |
559 | GFP_KERNEL); | |
560 | if (!pp->pkt) | |
561 | goto err_out_kfree; | |
562 | /* paranoia? */ | |
563 | if ((pp->pkt_dma & 7) != 0) { | |
564 | printk("bad alignment for pp->pkt_dma: %08x\n", | |
565 | (u32)pp->pkt_dma); | |
a21a84a3 JG |
566 | dma_free_coherent(dev, ADMA_PKT_BYTES, |
567 | pp->pkt, pp->pkt_dma); | |
568 | goto err_out_kfree; | |
edea3ab5 ML |
569 | } |
570 | memset(pp->pkt, 0, ADMA_PKT_BYTES); | |
571 | ap->private_data = pp; | |
572 | adma_reinit_engine(ap); | |
573 | return 0; | |
574 | ||
edea3ab5 ML |
575 | err_out_kfree: |
576 | kfree(pp); | |
577 | err_out: | |
578 | ata_port_stop(ap); | |
579 | return rc; | |
580 | } | |
581 | ||
582 | static void adma_port_stop(struct ata_port *ap) | |
583 | { | |
584 | struct device *dev = ap->host_set->dev; | |
585 | struct adma_port_priv *pp = ap->private_data; | |
586 | ||
587 | adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no)); | |
588 | if (pp != NULL) { | |
589 | ap->private_data = NULL; | |
590 | if (pp->pkt != NULL) | |
591 | dma_free_coherent(dev, ADMA_PKT_BYTES, | |
592 | pp->pkt, pp->pkt_dma); | |
593 | kfree(pp); | |
594 | } | |
595 | ata_port_stop(ap); | |
596 | } | |
597 | ||
598 | static void adma_host_stop(struct ata_host_set *host_set) | |
599 | { | |
600 | unsigned int port_no; | |
601 | ||
602 | for (port_no = 0; port_no < ADMA_PORTS; ++port_no) | |
603 | adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no)); | |
604 | ||
605 | ata_pci_host_stop(host_set); | |
606 | } | |
607 | ||
608 | static void adma_host_init(unsigned int chip_id, | |
609 | struct ata_probe_ent *probe_ent) | |
610 | { | |
611 | unsigned int port_no; | |
612 | void __iomem *mmio_base = probe_ent->mmio_base; | |
613 | ||
614 | /* enable/lock aGO operation */ | |
615 | writeb(7, mmio_base + ADMA_MODE_LOCK); | |
616 | ||
617 | /* reset the ADMA logic */ | |
618 | for (port_no = 0; port_no < ADMA_PORTS; ++port_no) | |
619 | adma_reset_engine(ADMA_REGS(mmio_base, port_no)); | |
620 | } | |
621 | ||
622 | static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) | |
623 | { | |
624 | int rc; | |
625 | ||
626 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
627 | if (rc) { | |
a9524a76 JG |
628 | dev_printk(KERN_ERR, &pdev->dev, |
629 | "32-bit DMA enable failed\n"); | |
edea3ab5 ML |
630 | return rc; |
631 | } | |
632 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
633 | if (rc) { | |
a9524a76 JG |
634 | dev_printk(KERN_ERR, &pdev->dev, |
635 | "32-bit consistent DMA enable failed\n"); | |
edea3ab5 ML |
636 | return rc; |
637 | } | |
638 | return 0; | |
639 | } | |
640 | ||
641 | static int adma_ata_init_one(struct pci_dev *pdev, | |
642 | const struct pci_device_id *ent) | |
643 | { | |
644 | static int printed_version; | |
645 | struct ata_probe_ent *probe_ent = NULL; | |
646 | void __iomem *mmio_base; | |
647 | unsigned int board_idx = (unsigned int) ent->driver_data; | |
648 | int rc, port_no; | |
649 | ||
650 | if (!printed_version++) | |
a9524a76 | 651 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
edea3ab5 ML |
652 | |
653 | rc = pci_enable_device(pdev); | |
654 | if (rc) | |
655 | return rc; | |
656 | ||
657 | rc = pci_request_regions(pdev, DRV_NAME); | |
658 | if (rc) | |
659 | goto err_out; | |
660 | ||
661 | if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) { | |
662 | rc = -ENODEV; | |
663 | goto err_out_regions; | |
664 | } | |
665 | ||
666 | mmio_base = pci_iomap(pdev, 4, 0); | |
667 | if (mmio_base == NULL) { | |
668 | rc = -ENOMEM; | |
669 | goto err_out_regions; | |
670 | } | |
671 | ||
672 | rc = adma_set_dma_masks(pdev, mmio_base); | |
673 | if (rc) | |
674 | goto err_out_iounmap; | |
675 | ||
676 | probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL); | |
677 | if (probe_ent == NULL) { | |
678 | rc = -ENOMEM; | |
679 | goto err_out_iounmap; | |
680 | } | |
681 | ||
682 | probe_ent->dev = pci_dev_to_dev(pdev); | |
683 | INIT_LIST_HEAD(&probe_ent->node); | |
684 | ||
685 | probe_ent->sht = adma_port_info[board_idx].sht; | |
686 | probe_ent->host_flags = adma_port_info[board_idx].host_flags; | |
687 | probe_ent->pio_mask = adma_port_info[board_idx].pio_mask; | |
688 | probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask; | |
689 | probe_ent->udma_mask = adma_port_info[board_idx].udma_mask; | |
690 | probe_ent->port_ops = adma_port_info[board_idx].port_ops; | |
691 | ||
692 | probe_ent->irq = pdev->irq; | |
1d6f359a | 693 | probe_ent->irq_flags = IRQF_SHARED; |
edea3ab5 ML |
694 | probe_ent->mmio_base = mmio_base; |
695 | probe_ent->n_ports = ADMA_PORTS; | |
696 | ||
697 | for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { | |
698 | adma_ata_setup_port(&probe_ent->port[port_no], | |
699 | ADMA_ATA_REGS((unsigned long)mmio_base, port_no)); | |
700 | } | |
701 | ||
702 | pci_set_master(pdev); | |
703 | ||
704 | /* initialize adapter */ | |
705 | adma_host_init(board_idx, probe_ent); | |
706 | ||
707 | rc = ata_device_add(probe_ent); | |
708 | kfree(probe_ent); | |
709 | if (rc != ADMA_PORTS) | |
710 | goto err_out_iounmap; | |
711 | return 0; | |
712 | ||
713 | err_out_iounmap: | |
714 | pci_iounmap(pdev, mmio_base); | |
715 | err_out_regions: | |
716 | pci_release_regions(pdev); | |
717 | err_out: | |
718 | pci_disable_device(pdev); | |
719 | return rc; | |
720 | } | |
721 | ||
722 | static int __init adma_ata_init(void) | |
723 | { | |
724 | return pci_module_init(&adma_ata_pci_driver); | |
725 | } | |
726 | ||
727 | static void __exit adma_ata_exit(void) | |
728 | { | |
729 | pci_unregister_driver(&adma_ata_pci_driver); | |
730 | } | |
731 | ||
732 | MODULE_AUTHOR("Mark Lord"); | |
733 | MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver"); | |
734 | MODULE_LICENSE("GPL"); | |
735 | MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl); | |
736 | MODULE_VERSION(DRV_VERSION); | |
737 | ||
738 | module_init(adma_ata_init); | |
739 | module_exit(adma_ata_exit); |