2 * sata_sil.c - Silicon Image SATA
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
33 * Other errata and documentation available under NDA.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/pci.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/interrupt.h>
43 #include <linux/device.h>
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
46 #include <linux/dmi.h>
48 #define DRV_NAME "sata_sil"
49 #define DRV_VERSION "2.4"
51 #define SIL_DMA_BOUNDARY 0x7fffffffUL
59 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
60 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
61 SIL_FLAG_MOD15WRITE = (1 << 30),
63 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
69 sil_3112_no_sata_irq = 1,
82 SIL_MASK_IDE0_INT = (1 << 22),
83 SIL_MASK_IDE1_INT = (1 << 23),
84 SIL_MASK_IDE2_INT = (1 << 24),
85 SIL_MASK_IDE3_INT = (1 << 25),
86 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
87 SIL_MASK_4PORT = SIL_MASK_2PORT |
88 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
91 SIL_INTR_STEERING = (1 << 1),
93 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
94 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
95 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
96 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
97 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
98 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
99 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
100 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
101 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
102 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
105 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
110 SIL_QUIRK_MOD15WRITE = (1 << 0),
111 SIL_QUIRK_UDMA5MAX = (1 << 1),
114 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
116 static int sil_pci_device_resume(struct pci_dev *pdev);
118 static void sil_dev_config(struct ata_device *dev);
119 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
120 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
121 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
122 static void sil_qc_prep(struct ata_queued_cmd *qc);
123 static void sil_bmdma_setup(struct ata_queued_cmd *qc);
124 static void sil_bmdma_start(struct ata_queued_cmd *qc);
125 static void sil_bmdma_stop(struct ata_queued_cmd *qc);
126 static void sil_freeze(struct ata_port *ap);
127 static void sil_thaw(struct ata_port *ap);
130 static const struct pci_device_id sil_pci_tbl[] = {
131 { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
132 { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
133 { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
134 { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
135 { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
136 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
137 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
139 { } /* terminate list */
143 /* TODO firmware versions should be added - eric */
144 static const struct sil_drivelist {
147 } sil_blacklist [] = {
148 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
149 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
150 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
151 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
152 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
153 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
154 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
155 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
156 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
157 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
158 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
159 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
163 static struct pci_driver sil_pci_driver = {
165 .id_table = sil_pci_tbl,
166 .probe = sil_init_one,
167 .remove = ata_pci_remove_one,
169 .suspend = ata_pci_device_suspend,
170 .resume = sil_pci_device_resume,
174 static struct scsi_host_template sil_sht = {
175 ATA_BASE_SHT(DRV_NAME),
176 /** These controllers support Large Block Transfer which allows
177 transfer chunks up to 2GB and which cross 64KB boundaries,
178 therefore the DMA limits are more relaxed than standard ATA SFF. */
179 .dma_boundary = SIL_DMA_BOUNDARY,
180 .sg_tablesize = ATA_MAX_PRD
183 static struct ata_port_operations sil_ops = {
184 .inherits = &ata_bmdma32_port_ops,
185 .dev_config = sil_dev_config,
186 .set_mode = sil_set_mode,
187 .bmdma_setup = sil_bmdma_setup,
188 .bmdma_start = sil_bmdma_start,
189 .bmdma_stop = sil_bmdma_stop,
190 .qc_prep = sil_qc_prep,
191 .freeze = sil_freeze,
193 .scr_read = sil_scr_read,
194 .scr_write = sil_scr_write,
197 static const struct ata_port_info sil_port_info[] = {
200 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
201 .pio_mask = ATA_PIO4,
202 .mwdma_mask = ATA_MWDMA2,
203 .udma_mask = ATA_UDMA5,
204 .port_ops = &sil_ops,
206 /* sil_3112_no_sata_irq */
208 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
209 SIL_FLAG_NO_SATA_IRQ,
210 .pio_mask = ATA_PIO4,
211 .mwdma_mask = ATA_MWDMA2,
212 .udma_mask = ATA_UDMA5,
213 .port_ops = &sil_ops,
217 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
218 .pio_mask = ATA_PIO4,
219 .mwdma_mask = ATA_MWDMA2,
220 .udma_mask = ATA_UDMA5,
221 .port_ops = &sil_ops,
225 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
226 .pio_mask = ATA_PIO4,
227 .mwdma_mask = ATA_MWDMA2,
228 .udma_mask = ATA_UDMA5,
229 .port_ops = &sil_ops,
233 /* per-port register offsets */
234 /* TODO: we can probably calculate rather than use a table */
235 static const struct {
236 unsigned long tf; /* ATA taskfile register block */
237 unsigned long ctl; /* ATA control/altstatus register block */
238 unsigned long bmdma; /* DMA register block */
239 unsigned long bmdma2; /* DMA register block #2 */
240 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
241 unsigned long scr; /* SATA control register block */
242 unsigned long sien; /* SATA Interrupt Enable register */
243 unsigned long xfer_mode;/* data transfer mode register */
244 unsigned long sfis_cfg; /* SATA FIS reception config register */
247 /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */
248 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
249 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
250 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
251 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
255 MODULE_AUTHOR("Jeff Garzik");
256 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
257 MODULE_LICENSE("GPL");
258 MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
259 MODULE_VERSION(DRV_VERSION);
261 static int slow_down;
262 module_param(slow_down, int, 0444);
263 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
266 static void sil_bmdma_stop(struct ata_queued_cmd *qc)
268 struct ata_port *ap = qc->ap;
269 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
270 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
272 /* clear start/stop bit - can safely always write 0 */
275 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
276 ata_sff_dma_pause(ap);
279 static void sil_bmdma_setup(struct ata_queued_cmd *qc)
281 struct ata_port *ap = qc->ap;
282 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
284 /* load PRD table addr. */
285 iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
287 /* issue r/w command */
288 ap->ops->sff_exec_command(ap, &qc->tf);
291 static void sil_bmdma_start(struct ata_queued_cmd *qc)
293 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
294 struct ata_port *ap = qc->ap;
295 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
296 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
297 u8 dmactl = ATA_DMA_START;
299 /* set transfer direction, start host DMA transaction
300 Note: For Large Block Transfer to work, the DMA must be started
301 using the bmdma2 register. */
303 dmactl |= ATA_DMA_WR;
304 iowrite8(dmactl, bmdma2);
307 /* The way God intended PCI IDE scatter/gather lists to look and behave... */
308 static void sil_fill_sg(struct ata_queued_cmd *qc)
310 struct scatterlist *sg;
311 struct ata_port *ap = qc->ap;
312 struct ata_bmdma_prd *prd, *last_prd = NULL;
315 prd = &ap->bmdma_prd[0];
316 for_each_sg(qc->sg, sg, qc->n_elem, si) {
317 /* Note h/w doesn't support 64-bit, so we unconditionally
318 * truncate dma_addr_t to u32.
320 u32 addr = (u32) sg_dma_address(sg);
321 u32 sg_len = sg_dma_len(sg);
323 prd->addr = cpu_to_le32(addr);
324 prd->flags_len = cpu_to_le32(sg_len);
325 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
331 if (likely(last_prd))
332 last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
335 static void sil_qc_prep(struct ata_queued_cmd *qc)
337 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
343 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
346 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
351 * sil_set_mode - wrap set_mode functions
352 * @link: link to set up
353 * @r_failed: returned device when we fail
355 * Wrap the libata method for device setup as after the setup we need
356 * to inspect the results and do some configuration work
359 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
361 struct ata_port *ap = link->ap;
362 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
363 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
364 struct ata_device *dev;
365 u32 tmp, dev_mode[2] = { };
368 rc = ata_do_set_mode(link, r_failed);
372 ata_for_each_dev(dev, link, ALL) {
373 if (!ata_dev_enabled(dev))
374 dev_mode[dev->devno] = 0; /* PIO0/1/2 */
375 else if (dev->flags & ATA_DFLAG_PIO)
376 dev_mode[dev->devno] = 1; /* PIO3/4 */
378 dev_mode[dev->devno] = 3; /* UDMA */
379 /* value 2 indicates MDMA */
383 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
385 tmp |= (dev_mode[1] << 4);
387 readl(addr); /* flush */
391 static inline void __iomem *sil_scr_addr(struct ata_port *ap,
394 void __iomem *offset = ap->ioaddr.scr_addr;
411 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
413 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
422 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
424 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
433 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
435 struct ata_eh_info *ehi = &ap->link.eh_info;
436 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
439 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
440 u32 serror = 0xffffffff;
442 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
443 * controllers continue to assert IRQ as long as
444 * SError bits are pending. Clear SError immediately.
446 sil_scr_read(&ap->link, SCR_ERROR, &serror);
447 sil_scr_write(&ap->link, SCR_ERROR, serror);
449 /* Sometimes spurious interrupts occur, double check
452 if (serror & SERR_PHYRDY_CHG) {
453 ap->link.eh_info.serror |= serror;
457 if (!(bmdma2 & SIL_DMA_COMPLETE))
461 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
462 /* this sometimes happens, just clear IRQ */
463 ap->ops->sff_check_status(ap);
467 /* Check whether we are expecting interrupt in this state */
468 switch (ap->hsm_task_state) {
470 /* Some pre-ATAPI-4 devices assert INTRQ
471 * at this state when ready to receive CDB.
474 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
475 * The flag was turned on only for atapi devices. No
476 * need to check ata_is_atapi(qc->tf.protocol) again.
478 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
482 if (ata_is_dma(qc->tf.protocol)) {
483 /* clear DMA-Start bit */
484 ap->ops->bmdma_stop(qc);
486 if (bmdma2 & SIL_DMA_ERROR) {
487 qc->err_mask |= AC_ERR_HOST_BUS;
488 ap->hsm_task_state = HSM_ST_ERR;
498 /* check main status, clearing INTRQ */
499 status = ap->ops->sff_check_status(ap);
500 if (unlikely(status & ATA_BUSY))
503 /* ack bmdma irq events */
504 ata_bmdma_irq_clear(ap);
506 /* kick HSM in the ass */
507 ata_sff_hsm_move(ap, qc, status, 0);
509 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
510 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
515 qc->err_mask |= AC_ERR_HSM;
520 static irqreturn_t sil_interrupt(int irq, void *dev_instance)
522 struct ata_host *host = dev_instance;
523 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
527 spin_lock(&host->lock);
529 for (i = 0; i < host->n_ports; i++) {
530 struct ata_port *ap = host->ports[i];
531 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
533 /* turn off SATA_IRQ if not supported */
534 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
535 bmdma2 &= ~SIL_DMA_SATA_IRQ;
537 if (bmdma2 == 0xffffffff ||
538 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
541 sil_host_intr(ap, bmdma2);
545 spin_unlock(&host->lock);
547 return IRQ_RETVAL(handled);
550 static void sil_freeze(struct ata_port *ap)
552 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
555 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
556 writel(0, mmio_base + sil_port[ap->port_no].sien);
559 tmp = readl(mmio_base + SIL_SYSCFG);
560 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
561 writel(tmp, mmio_base + SIL_SYSCFG);
562 readl(mmio_base + SIL_SYSCFG); /* flush */
564 /* Ensure DMA_ENABLE is off.
566 * This is because the controller will not give us access to the
567 * taskfile registers while a DMA is in progress
569 iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
570 ap->ioaddr.bmdma_addr);
572 /* According to ata_bmdma_stop, an HDMA transition requires
573 * on PIO cycle. But we can't read a taskfile register.
575 ioread8(ap->ioaddr.bmdma_addr);
578 static void sil_thaw(struct ata_port *ap)
580 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
584 ap->ops->sff_check_status(ap);
585 ata_bmdma_irq_clear(ap);
587 /* turn on SATA IRQ if supported */
588 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
589 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
592 tmp = readl(mmio_base + SIL_SYSCFG);
593 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
594 writel(tmp, mmio_base + SIL_SYSCFG);
598 * sil_dev_config - Apply device/host-specific errata fixups
599 * @dev: Device to be examined
601 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
602 * device is known to be present, this function is called.
603 * We apply two errata fixups which are specific to Silicon Image,
604 * a Seagate and a Maxtor fixup.
606 * For certain Seagate devices, we must limit the maximum sectors
609 * For certain Maxtor devices, we must not program the drive
612 * Both fixups are unfairly pessimistic. As soon as I get more
613 * information on these errata, I will create a more exhaustive
614 * list, and apply the fixups to only the specific
615 * devices/hosts/firmwares that need it.
617 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
618 * The Maxtor quirk is in the blacklist, but I'm keeping the original
619 * pessimistic fix for the following reasons...
620 * - There seems to be less info on it, only one device gleaned off the
621 * Windows driver, maybe only one is affected. More info would be greatly
623 * - But then again UDMA5 is hardly anything to complain about
625 static void sil_dev_config(struct ata_device *dev)
627 struct ata_port *ap = dev->link->ap;
628 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
629 unsigned int n, quirks = 0;
630 unsigned char model_num[ATA_ID_PROD_LEN + 1];
632 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
634 for (n = 0; sil_blacklist[n].product; n++)
635 if (!strcmp(sil_blacklist[n].product, model_num)) {
636 quirks = sil_blacklist[n].quirk;
640 /* limit requests to 15 sectors */
642 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
643 (quirks & SIL_QUIRK_MOD15WRITE))) {
646 "applying Seagate errata fix (mod15write workaround)\n");
647 dev->max_sectors = 15;
652 if (quirks & SIL_QUIRK_UDMA5MAX) {
654 ata_dev_info(dev, "applying Maxtor errata fix %s\n",
656 dev->udma_mask &= ATA_UDMA5;
661 static void sil_init_controller(struct ata_host *host)
663 struct pci_dev *pdev = to_pci_dev(host->dev);
664 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
669 /* Initialize FIFO PCI bus arbitration */
670 cls = sil_get_device_cache_line(pdev);
673 cls++; /* cls = (line_size/8)+1 */
674 for (i = 0; i < host->n_ports; i++)
675 writew(cls << 8 | cls,
676 mmio_base + sil_port[i].fifo_cfg);
679 "cache line size not set. Driver may not function\n");
681 /* Apply R_ERR on DMA activate FIS errata workaround */
682 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
685 for (i = 0, cnt = 0; i < host->n_ports; i++) {
686 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
687 if ((tmp & 0x3) != 0x01)
691 "Applying R_ERR on DMA activate FIS errata fix\n");
692 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
697 if (host->n_ports == 4) {
698 /* flip the magic "make 4 ports work" bit */
699 tmp = readl(mmio_base + sil_port[2].bmdma);
700 if ((tmp & SIL_INTR_STEERING) == 0)
701 writel(tmp | SIL_INTR_STEERING,
702 mmio_base + sil_port[2].bmdma);
706 static bool sil_broken_system_poweroff(struct pci_dev *pdev)
708 static const struct dmi_system_id broken_systems[] = {
710 .ident = "HP Compaq nx6325",
712 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
713 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
715 /* PCI slot number of the controller */
716 .driver_data = (void *)0x12UL,
719 { } /* terminate list */
721 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
724 unsigned long slot = (unsigned long)dmi->driver_data;
725 /* apply the quirk only to on-board controllers */
726 return slot == PCI_SLOT(pdev->devfn);
732 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
734 int board_id = ent->driver_data;
735 struct ata_port_info pi = sil_port_info[board_id];
736 const struct ata_port_info *ppi[] = { &pi, NULL };
737 struct ata_host *host;
738 void __iomem *mmio_base;
742 ata_print_version_once(&pdev->dev, DRV_VERSION);
746 if (board_id == sil_3114)
749 if (sil_broken_system_poweroff(pdev)) {
750 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
751 ATA_FLAG_NO_HIBERNATE_SPINDOWN;
752 dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
753 "on poweroff and hibernation\n");
756 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
760 /* acquire resources and fill host */
761 rc = pcim_enable_device(pdev);
765 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
767 pcim_pin_device(pdev);
770 host->iomap = pcim_iomap_table(pdev);
772 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
775 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
779 mmio_base = host->iomap[SIL_MMIO_BAR];
781 for (i = 0; i < host->n_ports; i++) {
782 struct ata_port *ap = host->ports[i];
783 struct ata_ioports *ioaddr = &ap->ioaddr;
785 ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
786 ioaddr->altstatus_addr =
787 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
788 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
789 ioaddr->scr_addr = mmio_base + sil_port[i].scr;
790 ata_sff_std_ports(ioaddr);
792 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
793 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
796 /* initialize and activate */
797 sil_init_controller(host);
799 pci_set_master(pdev);
800 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
805 static int sil_pci_device_resume(struct pci_dev *pdev)
807 struct ata_host *host = pci_get_drvdata(pdev);
810 rc = ata_pci_device_do_resume(pdev);
814 sil_init_controller(host);
815 ata_host_resume(host);
821 module_pci_driver(sil_pci_driver);