| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * sata_sil.c - Silicon Image SATA |
| 4 | * |
| 5 | * Maintained by: Tejun Heo <tj@kernel.org> |
| 6 | * Please ALWAYS copy linux-ide@vger.kernel.org |
| 7 | * on emails. |
| 8 | * |
| 9 | * Copyright 2003-2005 Red Hat, Inc. |
| 10 | * Copyright 2003 Benjamin Herrenschmidt |
| 11 | * |
| 12 | * libata documentation is available via 'make {ps|pdf}docs', |
| 13 | * as Documentation/driver-api/libata.rst |
| 14 | * |
| 15 | * Documentation for SiI 3112: |
| 16 | * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 |
| 17 | * |
| 18 | * Other errata and documentation available under NDA. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/pci.h> |
| 24 | #include <linux/blkdev.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/device.h> |
| 28 | #include <scsi/scsi_host.h> |
| 29 | #include <linux/libata.h> |
| 30 | #include <linux/dmi.h> |
| 31 | |
| 32 | #define DRV_NAME "sata_sil" |
| 33 | #define DRV_VERSION "2.4" |
| 34 | |
| 35 | #define SIL_DMA_BOUNDARY 0x7fffffffUL |
| 36 | |
| 37 | enum { |
| 38 | SIL_MMIO_BAR = 5, |
| 39 | |
| 40 | /* |
| 41 | * host flags |
| 42 | */ |
| 43 | SIL_FLAG_NO_SATA_IRQ = (1 << 28), |
| 44 | SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), |
| 45 | SIL_FLAG_MOD15WRITE = (1 << 30), |
| 46 | |
| 47 | SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA, |
| 48 | |
| 49 | /* |
| 50 | * Controller IDs |
| 51 | */ |
| 52 | sil_3112 = 0, |
| 53 | sil_3112_no_sata_irq = 1, |
| 54 | sil_3512 = 2, |
| 55 | sil_3114 = 3, |
| 56 | |
| 57 | /* |
| 58 | * Register offsets |
| 59 | */ |
| 60 | SIL_SYSCFG = 0x48, |
| 61 | |
| 62 | /* |
| 63 | * Register bits |
| 64 | */ |
| 65 | /* SYSCFG */ |
| 66 | SIL_MASK_IDE0_INT = (1 << 22), |
| 67 | SIL_MASK_IDE1_INT = (1 << 23), |
| 68 | SIL_MASK_IDE2_INT = (1 << 24), |
| 69 | SIL_MASK_IDE3_INT = (1 << 25), |
| 70 | SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, |
| 71 | SIL_MASK_4PORT = SIL_MASK_2PORT | |
| 72 | SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, |
| 73 | |
| 74 | /* BMDMA/BMDMA2 */ |
| 75 | SIL_INTR_STEERING = (1 << 1), |
| 76 | |
| 77 | SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */ |
| 78 | SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */ |
| 79 | SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */ |
| 80 | SIL_DMA_ACTIVE = (1 << 16), /* DMA running */ |
| 81 | SIL_DMA_ERROR = (1 << 17), /* PCI bus error */ |
| 82 | SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */ |
| 83 | SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */ |
| 84 | SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */ |
| 85 | SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */ |
| 86 | SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */ |
| 87 | |
| 88 | /* SIEN */ |
| 89 | SIL_SIEN_N = (1 << 16), /* triggered by SError.N */ |
| 90 | |
| 91 | /* |
| 92 | * Others |
| 93 | */ |
| 94 | SIL_QUIRK_MOD15WRITE = (1 << 0), |
| 95 | SIL_QUIRK_UDMA5MAX = (1 << 1), |
| 96 | }; |
| 97 | |
| 98 | static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
| 99 | #ifdef CONFIG_PM_SLEEP |
| 100 | static int sil_pci_device_resume(struct pci_dev *pdev); |
| 101 | #endif |
| 102 | static void sil_dev_config(struct ata_device *dev); |
| 103 | static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); |
| 104 | static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
| 105 | static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); |
| 106 | static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); |
| 107 | static void sil_bmdma_setup(struct ata_queued_cmd *qc); |
| 108 | static void sil_bmdma_start(struct ata_queued_cmd *qc); |
| 109 | static void sil_bmdma_stop(struct ata_queued_cmd *qc); |
| 110 | static void sil_freeze(struct ata_port *ap); |
| 111 | static void sil_thaw(struct ata_port *ap); |
| 112 | |
| 113 | |
| 114 | static const struct pci_device_id sil_pci_tbl[] = { |
| 115 | { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, |
| 116 | { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, |
| 117 | { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, |
| 118 | { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, |
| 119 | { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, |
| 120 | { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, |
| 121 | { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, |
| 122 | |
| 123 | { } /* terminate list */ |
| 124 | }; |
| 125 | |
| 126 | |
| 127 | /* TODO firmware versions should be added - eric */ |
| 128 | static const struct sil_drivelist { |
| 129 | const char *product; |
| 130 | unsigned int quirk; |
| 131 | } sil_quirks[] = { |
| 132 | { "ST320012AS", SIL_QUIRK_MOD15WRITE }, |
| 133 | { "ST330013AS", SIL_QUIRK_MOD15WRITE }, |
| 134 | { "ST340017AS", SIL_QUIRK_MOD15WRITE }, |
| 135 | { "ST360015AS", SIL_QUIRK_MOD15WRITE }, |
| 136 | { "ST380023AS", SIL_QUIRK_MOD15WRITE }, |
| 137 | { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, |
| 138 | { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, |
| 139 | { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, |
| 140 | { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, |
| 141 | { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, |
| 142 | { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, |
| 143 | { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE }, |
| 144 | { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, |
| 145 | { } |
| 146 | }; |
| 147 | |
| 148 | static struct pci_driver sil_pci_driver = { |
| 149 | .name = DRV_NAME, |
| 150 | .id_table = sil_pci_tbl, |
| 151 | .probe = sil_init_one, |
| 152 | .remove = ata_pci_remove_one, |
| 153 | #ifdef CONFIG_PM_SLEEP |
| 154 | .suspend = ata_pci_device_suspend, |
| 155 | .resume = sil_pci_device_resume, |
| 156 | #endif |
| 157 | }; |
| 158 | |
| 159 | static const struct scsi_host_template sil_sht = { |
| 160 | ATA_BASE_SHT(DRV_NAME), |
| 161 | /** These controllers support Large Block Transfer which allows |
| 162 | transfer chunks up to 2GB and which cross 64KB boundaries, |
| 163 | therefore the DMA limits are more relaxed than standard ATA SFF. */ |
| 164 | .dma_boundary = SIL_DMA_BOUNDARY, |
| 165 | .sg_tablesize = ATA_MAX_PRD |
| 166 | }; |
| 167 | |
| 168 | static struct ata_port_operations sil_ops = { |
| 169 | .inherits = &ata_bmdma32_port_ops, |
| 170 | .dev_config = sil_dev_config, |
| 171 | .set_mode = sil_set_mode, |
| 172 | .bmdma_setup = sil_bmdma_setup, |
| 173 | .bmdma_start = sil_bmdma_start, |
| 174 | .bmdma_stop = sil_bmdma_stop, |
| 175 | .qc_prep = sil_qc_prep, |
| 176 | .freeze = sil_freeze, |
| 177 | .thaw = sil_thaw, |
| 178 | .scr_read = sil_scr_read, |
| 179 | .scr_write = sil_scr_write, |
| 180 | }; |
| 181 | |
| 182 | static const struct ata_port_info sil_port_info[] = { |
| 183 | /* sil_3112 */ |
| 184 | { |
| 185 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, |
| 186 | .pio_mask = ATA_PIO4, |
| 187 | .mwdma_mask = ATA_MWDMA2, |
| 188 | .udma_mask = ATA_UDMA5, |
| 189 | .port_ops = &sil_ops, |
| 190 | }, |
| 191 | /* sil_3112_no_sata_irq */ |
| 192 | { |
| 193 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | |
| 194 | SIL_FLAG_NO_SATA_IRQ, |
| 195 | .pio_mask = ATA_PIO4, |
| 196 | .mwdma_mask = ATA_MWDMA2, |
| 197 | .udma_mask = ATA_UDMA5, |
| 198 | .port_ops = &sil_ops, |
| 199 | }, |
| 200 | /* sil_3512 */ |
| 201 | { |
| 202 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, |
| 203 | .pio_mask = ATA_PIO4, |
| 204 | .mwdma_mask = ATA_MWDMA2, |
| 205 | .udma_mask = ATA_UDMA5, |
| 206 | .port_ops = &sil_ops, |
| 207 | }, |
| 208 | /* sil_3114 */ |
| 209 | { |
| 210 | .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, |
| 211 | .pio_mask = ATA_PIO4, |
| 212 | .mwdma_mask = ATA_MWDMA2, |
| 213 | .udma_mask = ATA_UDMA5, |
| 214 | .port_ops = &sil_ops, |
| 215 | }, |
| 216 | }; |
| 217 | |
| 218 | /* per-port register offsets */ |
| 219 | /* TODO: we can probably calculate rather than use a table */ |
| 220 | static const struct { |
| 221 | unsigned long tf; /* ATA taskfile register block */ |
| 222 | unsigned long ctl; /* ATA control/altstatus register block */ |
| 223 | unsigned long bmdma; /* DMA register block */ |
| 224 | unsigned long bmdma2; /* DMA register block #2 */ |
| 225 | unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ |
| 226 | unsigned long scr; /* SATA control register block */ |
| 227 | unsigned long sien; /* SATA Interrupt Enable register */ |
| 228 | unsigned long xfer_mode;/* data transfer mode register */ |
| 229 | unsigned long sfis_cfg; /* SATA FIS reception config register */ |
| 230 | } sil_port[] = { |
| 231 | /* port 0 ... */ |
| 232 | /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */ |
| 233 | { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, |
| 234 | { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, |
| 235 | { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, |
| 236 | { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, |
| 237 | /* ... port 3 */ |
| 238 | }; |
| 239 | |
| 240 | MODULE_AUTHOR("Jeff Garzik"); |
| 241 | MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); |
| 242 | MODULE_LICENSE("GPL"); |
| 243 | MODULE_DEVICE_TABLE(pci, sil_pci_tbl); |
| 244 | MODULE_VERSION(DRV_VERSION); |
| 245 | |
| 246 | static int slow_down; |
| 247 | module_param(slow_down, int, 0444); |
| 248 | MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); |
| 249 | |
| 250 | |
| 251 | static void sil_bmdma_stop(struct ata_queued_cmd *qc) |
| 252 | { |
| 253 | struct ata_port *ap = qc->ap; |
| 254 | void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; |
| 255 | void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2; |
| 256 | |
| 257 | /* clear start/stop bit - can safely always write 0 */ |
| 258 | iowrite8(0, bmdma2); |
| 259 | |
| 260 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
| 261 | ata_sff_dma_pause(ap); |
| 262 | } |
| 263 | |
| 264 | static void sil_bmdma_setup(struct ata_queued_cmd *qc) |
| 265 | { |
| 266 | struct ata_port *ap = qc->ap; |
| 267 | void __iomem *bmdma = ap->ioaddr.bmdma_addr; |
| 268 | |
| 269 | /* load PRD table addr. */ |
| 270 | iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS); |
| 271 | |
| 272 | /* issue r/w command */ |
| 273 | ap->ops->sff_exec_command(ap, &qc->tf); |
| 274 | } |
| 275 | |
| 276 | static void sil_bmdma_start(struct ata_queued_cmd *qc) |
| 277 | { |
| 278 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); |
| 279 | struct ata_port *ap = qc->ap; |
| 280 | void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; |
| 281 | void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2; |
| 282 | u8 dmactl = ATA_DMA_START; |
| 283 | |
| 284 | /* set transfer direction, start host DMA transaction |
| 285 | Note: For Large Block Transfer to work, the DMA must be started |
| 286 | using the bmdma2 register. */ |
| 287 | if (!rw) |
| 288 | dmactl |= ATA_DMA_WR; |
| 289 | iowrite8(dmactl, bmdma2); |
| 290 | } |
| 291 | |
| 292 | /* The way God intended PCI IDE scatter/gather lists to look and behave... */ |
| 293 | static void sil_fill_sg(struct ata_queued_cmd *qc) |
| 294 | { |
| 295 | struct scatterlist *sg; |
| 296 | struct ata_port *ap = qc->ap; |
| 297 | struct ata_bmdma_prd *prd, *last_prd = NULL; |
| 298 | unsigned int si; |
| 299 | |
| 300 | prd = &ap->bmdma_prd[0]; |
| 301 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
| 302 | /* Note h/w doesn't support 64-bit, so we unconditionally |
| 303 | * truncate dma_addr_t to u32. |
| 304 | */ |
| 305 | u32 addr = (u32) sg_dma_address(sg); |
| 306 | u32 sg_len = sg_dma_len(sg); |
| 307 | |
| 308 | prd->addr = cpu_to_le32(addr); |
| 309 | prd->flags_len = cpu_to_le32(sg_len); |
| 310 | |
| 311 | last_prd = prd; |
| 312 | prd++; |
| 313 | } |
| 314 | |
| 315 | if (likely(last_prd)) |
| 316 | last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); |
| 317 | } |
| 318 | |
| 319 | static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) |
| 320 | { |
| 321 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
| 322 | return AC_ERR_OK; |
| 323 | |
| 324 | sil_fill_sg(qc); |
| 325 | |
| 326 | return AC_ERR_OK; |
| 327 | } |
| 328 | |
| 329 | static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) |
| 330 | { |
| 331 | u8 cache_line = 0; |
| 332 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line); |
| 333 | return cache_line; |
| 334 | } |
| 335 | |
| 336 | /** |
| 337 | * sil_set_mode - wrap set_mode functions |
| 338 | * @link: link to set up |
| 339 | * @r_failed: returned device when we fail |
| 340 | * |
| 341 | * Wrap the libata method for device setup as after the setup we need |
| 342 | * to inspect the results and do some configuration work |
| 343 | */ |
| 344 | |
| 345 | static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) |
| 346 | { |
| 347 | struct ata_port *ap = link->ap; |
| 348 | void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; |
| 349 | void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; |
| 350 | struct ata_device *dev; |
| 351 | u32 tmp, dev_mode[2] = { }; |
| 352 | int rc; |
| 353 | |
| 354 | rc = ata_do_set_mode(link, r_failed); |
| 355 | if (rc) |
| 356 | return rc; |
| 357 | |
| 358 | ata_for_each_dev(dev, link, ALL) { |
| 359 | if (!ata_dev_enabled(dev)) |
| 360 | dev_mode[dev->devno] = 0; /* PIO0/1/2 */ |
| 361 | else if (dev->flags & ATA_DFLAG_PIO) |
| 362 | dev_mode[dev->devno] = 1; /* PIO3/4 */ |
| 363 | else |
| 364 | dev_mode[dev->devno] = 3; /* UDMA */ |
| 365 | /* value 2 indicates MDMA */ |
| 366 | } |
| 367 | |
| 368 | tmp = readl(addr); |
| 369 | tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); |
| 370 | tmp |= dev_mode[0]; |
| 371 | tmp |= (dev_mode[1] << 4); |
| 372 | writel(tmp, addr); |
| 373 | readl(addr); /* flush */ |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | static inline void __iomem *sil_scr_addr(struct ata_port *ap, |
| 378 | unsigned int sc_reg) |
| 379 | { |
| 380 | void __iomem *offset = ap->ioaddr.scr_addr; |
| 381 | |
| 382 | switch (sc_reg) { |
| 383 | case SCR_STATUS: |
| 384 | return offset + 4; |
| 385 | case SCR_ERROR: |
| 386 | return offset + 8; |
| 387 | case SCR_CONTROL: |
| 388 | return offset; |
| 389 | default: |
| 390 | /* do nothing */ |
| 391 | break; |
| 392 | } |
| 393 | |
| 394 | return NULL; |
| 395 | } |
| 396 | |
| 397 | static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) |
| 398 | { |
| 399 | void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); |
| 400 | |
| 401 | if (mmio) { |
| 402 | *val = readl(mmio); |
| 403 | return 0; |
| 404 | } |
| 405 | return -EINVAL; |
| 406 | } |
| 407 | |
| 408 | static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) |
| 409 | { |
| 410 | void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); |
| 411 | |
| 412 | if (mmio) { |
| 413 | writel(val, mmio); |
| 414 | return 0; |
| 415 | } |
| 416 | return -EINVAL; |
| 417 | } |
| 418 | |
| 419 | static void sil_host_intr(struct ata_port *ap, u32 bmdma2) |
| 420 | { |
| 421 | struct ata_eh_info *ehi = &ap->link.eh_info; |
| 422 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
| 423 | u8 status; |
| 424 | |
| 425 | if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { |
| 426 | u32 serror = 0xffffffff; |
| 427 | |
| 428 | /* SIEN doesn't mask SATA IRQs on some 3112s. Those |
| 429 | * controllers continue to assert IRQ as long as |
| 430 | * SError bits are pending. Clear SError immediately. |
| 431 | */ |
| 432 | sil_scr_read(&ap->link, SCR_ERROR, &serror); |
| 433 | sil_scr_write(&ap->link, SCR_ERROR, serror); |
| 434 | |
| 435 | /* Sometimes spurious interrupts occur, double check |
| 436 | * it's PHYRDY CHG. |
| 437 | */ |
| 438 | if (serror & SERR_PHYRDY_CHG) { |
| 439 | ap->link.eh_info.serror |= serror; |
| 440 | goto freeze; |
| 441 | } |
| 442 | |
| 443 | if (!(bmdma2 & SIL_DMA_COMPLETE)) |
| 444 | return; |
| 445 | } |
| 446 | |
| 447 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { |
| 448 | /* this sometimes happens, just clear IRQ */ |
| 449 | ap->ops->sff_check_status(ap); |
| 450 | return; |
| 451 | } |
| 452 | |
| 453 | /* Check whether we are expecting interrupt in this state */ |
| 454 | switch (ap->hsm_task_state) { |
| 455 | case HSM_ST_FIRST: |
| 456 | /* Some pre-ATAPI-4 devices assert INTRQ |
| 457 | * at this state when ready to receive CDB. |
| 458 | */ |
| 459 | |
| 460 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. |
| 461 | * The flag was turned on only for atapi devices. No |
| 462 | * need to check ata_is_atapi(qc->tf.protocol) again. |
| 463 | */ |
| 464 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 465 | goto err_hsm; |
| 466 | break; |
| 467 | case HSM_ST_LAST: |
| 468 | if (ata_is_dma(qc->tf.protocol)) { |
| 469 | /* clear DMA-Start bit */ |
| 470 | ap->ops->bmdma_stop(qc); |
| 471 | |
| 472 | if (bmdma2 & SIL_DMA_ERROR) { |
| 473 | qc->err_mask |= AC_ERR_HOST_BUS; |
| 474 | ap->hsm_task_state = HSM_ST_ERR; |
| 475 | } |
| 476 | } |
| 477 | break; |
| 478 | case HSM_ST: |
| 479 | break; |
| 480 | default: |
| 481 | goto err_hsm; |
| 482 | } |
| 483 | |
| 484 | /* check main status, clearing INTRQ */ |
| 485 | status = ap->ops->sff_check_status(ap); |
| 486 | if (unlikely(status & ATA_BUSY)) |
| 487 | goto err_hsm; |
| 488 | |
| 489 | /* ack bmdma irq events */ |
| 490 | ata_bmdma_irq_clear(ap); |
| 491 | |
| 492 | /* kick HSM in the ass */ |
| 493 | ata_sff_hsm_move(ap, qc, status, 0); |
| 494 | |
| 495 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) |
| 496 | ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); |
| 497 | |
| 498 | return; |
| 499 | |
| 500 | err_hsm: |
| 501 | qc->err_mask |= AC_ERR_HSM; |
| 502 | freeze: |
| 503 | ata_port_freeze(ap); |
| 504 | } |
| 505 | |
| 506 | static irqreturn_t sil_interrupt(int irq, void *dev_instance) |
| 507 | { |
| 508 | struct ata_host *host = dev_instance; |
| 509 | void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; |
| 510 | int handled = 0; |
| 511 | int i; |
| 512 | |
| 513 | spin_lock(&host->lock); |
| 514 | |
| 515 | for (i = 0; i < host->n_ports; i++) { |
| 516 | struct ata_port *ap = host->ports[i]; |
| 517 | u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); |
| 518 | |
| 519 | /* turn off SATA_IRQ if not supported */ |
| 520 | if (ap->flags & SIL_FLAG_NO_SATA_IRQ) |
| 521 | bmdma2 &= ~SIL_DMA_SATA_IRQ; |
| 522 | |
| 523 | if (bmdma2 == 0xffffffff || |
| 524 | !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) |
| 525 | continue; |
| 526 | |
| 527 | sil_host_intr(ap, bmdma2); |
| 528 | handled = 1; |
| 529 | } |
| 530 | |
| 531 | spin_unlock(&host->lock); |
| 532 | |
| 533 | return IRQ_RETVAL(handled); |
| 534 | } |
| 535 | |
| 536 | static void sil_freeze(struct ata_port *ap) |
| 537 | { |
| 538 | void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; |
| 539 | u32 tmp; |
| 540 | |
| 541 | /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ |
| 542 | writel(0, mmio_base + sil_port[ap->port_no].sien); |
| 543 | |
| 544 | /* plug IRQ */ |
| 545 | tmp = readl(mmio_base + SIL_SYSCFG); |
| 546 | tmp |= SIL_MASK_IDE0_INT << ap->port_no; |
| 547 | writel(tmp, mmio_base + SIL_SYSCFG); |
| 548 | readl(mmio_base + SIL_SYSCFG); /* flush */ |
| 549 | |
| 550 | /* Ensure DMA_ENABLE is off. |
| 551 | * |
| 552 | * This is because the controller will not give us access to the |
| 553 | * taskfile registers while a DMA is in progress |
| 554 | */ |
| 555 | iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE, |
| 556 | ap->ioaddr.bmdma_addr); |
| 557 | |
| 558 | /* According to ata_bmdma_stop, an HDMA transition requires |
| 559 | * on PIO cycle. But we can't read a taskfile register. |
| 560 | */ |
| 561 | ioread8(ap->ioaddr.bmdma_addr); |
| 562 | } |
| 563 | |
| 564 | static void sil_thaw(struct ata_port *ap) |
| 565 | { |
| 566 | void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; |
| 567 | u32 tmp; |
| 568 | |
| 569 | /* clear IRQ */ |
| 570 | ap->ops->sff_check_status(ap); |
| 571 | ata_bmdma_irq_clear(ap); |
| 572 | |
| 573 | /* turn on SATA IRQ if supported */ |
| 574 | if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) |
| 575 | writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); |
| 576 | |
| 577 | /* turn on IRQ */ |
| 578 | tmp = readl(mmio_base + SIL_SYSCFG); |
| 579 | tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); |
| 580 | writel(tmp, mmio_base + SIL_SYSCFG); |
| 581 | } |
| 582 | |
| 583 | /** |
| 584 | * sil_dev_config - Apply device/host-specific errata fixups |
| 585 | * @dev: Device to be examined |
| 586 | * |
| 587 | * After the IDENTIFY [PACKET] DEVICE step is complete, and a |
| 588 | * device is known to be present, this function is called. |
| 589 | * We apply two errata fixups which are specific to Silicon Image, |
| 590 | * a Seagate and a Maxtor fixup. |
| 591 | * |
| 592 | * For certain Seagate devices, we must limit the maximum sectors |
| 593 | * to under 8K. |
| 594 | * |
| 595 | * For certain Maxtor devices, we must not program the drive |
| 596 | * beyond udma5. |
| 597 | * |
| 598 | * Both fixups are unfairly pessimistic. As soon as I get more |
| 599 | * information on these errata, I will create a more exhaustive |
| 600 | * list, and apply the fixups to only the specific |
| 601 | * devices/hosts/firmwares that need it. |
| 602 | * |
| 603 | * 20040111 - Seagate drives affected by the Mod15Write bug are quirked |
| 604 | * The Maxtor quirk is in sil_quirks, but I'm keeping the original |
| 605 | * pessimistic fix for the following reasons... |
| 606 | * - There seems to be less info on it, only one device gleaned off the |
| 607 | * Windows driver, maybe only one is affected. More info would be greatly |
| 608 | * appreciated. |
| 609 | * - But then again UDMA5 is hardly anything to complain about |
| 610 | */ |
| 611 | static void sil_dev_config(struct ata_device *dev) |
| 612 | { |
| 613 | struct ata_port *ap = dev->link->ap; |
| 614 | int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; |
| 615 | unsigned int n, quirks = 0; |
| 616 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
| 617 | |
| 618 | /* This controller doesn't support trim */ |
| 619 | dev->quirks |= ATA_QUIRK_NOTRIM; |
| 620 | |
| 621 | ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); |
| 622 | |
| 623 | for (n = 0; sil_quirks[n].product; n++) |
| 624 | if (!strcmp(sil_quirks[n].product, model_num)) { |
| 625 | quirks = sil_quirks[n].quirk; |
| 626 | break; |
| 627 | } |
| 628 | |
| 629 | /* limit requests to 15 sectors */ |
| 630 | if (slow_down || |
| 631 | ((ap->flags & SIL_FLAG_MOD15WRITE) && |
| 632 | (quirks & SIL_QUIRK_MOD15WRITE))) { |
| 633 | if (print_info) |
| 634 | ata_dev_info(dev, |
| 635 | "applying Seagate errata fix (mod15write workaround)\n"); |
| 636 | dev->max_sectors = 15; |
| 637 | return; |
| 638 | } |
| 639 | |
| 640 | /* limit to udma5 */ |
| 641 | if (quirks & SIL_QUIRK_UDMA5MAX) { |
| 642 | if (print_info) |
| 643 | ata_dev_info(dev, "applying Maxtor errata fix %s\n", |
| 644 | model_num); |
| 645 | dev->udma_mask &= ATA_UDMA5; |
| 646 | return; |
| 647 | } |
| 648 | } |
| 649 | |
| 650 | static void sil_init_controller(struct ata_host *host) |
| 651 | { |
| 652 | struct pci_dev *pdev = to_pci_dev(host->dev); |
| 653 | void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; |
| 654 | u8 cls; |
| 655 | u32 tmp; |
| 656 | int i; |
| 657 | |
| 658 | /* Initialize FIFO PCI bus arbitration */ |
| 659 | cls = sil_get_device_cache_line(pdev); |
| 660 | if (cls) { |
| 661 | cls >>= 3; |
| 662 | cls++; /* cls = (line_size/8)+1 */ |
| 663 | for (i = 0; i < host->n_ports; i++) |
| 664 | writew(cls << 8 | cls, |
| 665 | mmio_base + sil_port[i].fifo_cfg); |
| 666 | } else |
| 667 | dev_warn(&pdev->dev, |
| 668 | "cache line size not set. Driver may not function\n"); |
| 669 | |
| 670 | /* Apply R_ERR on DMA activate FIS errata workaround */ |
| 671 | if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { |
| 672 | int cnt; |
| 673 | |
| 674 | for (i = 0, cnt = 0; i < host->n_ports; i++) { |
| 675 | tmp = readl(mmio_base + sil_port[i].sfis_cfg); |
| 676 | if ((tmp & 0x3) != 0x01) |
| 677 | continue; |
| 678 | if (!cnt) |
| 679 | dev_info(&pdev->dev, |
| 680 | "Applying R_ERR on DMA activate FIS errata fix\n"); |
| 681 | writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); |
| 682 | cnt++; |
| 683 | } |
| 684 | } |
| 685 | |
| 686 | if (host->n_ports == 4) { |
| 687 | /* flip the magic "make 4 ports work" bit */ |
| 688 | tmp = readl(mmio_base + sil_port[2].bmdma); |
| 689 | if ((tmp & SIL_INTR_STEERING) == 0) |
| 690 | writel(tmp | SIL_INTR_STEERING, |
| 691 | mmio_base + sil_port[2].bmdma); |
| 692 | } |
| 693 | } |
| 694 | |
| 695 | static bool sil_broken_system_poweroff(struct pci_dev *pdev) |
| 696 | { |
| 697 | static const struct dmi_system_id broken_systems[] = { |
| 698 | { |
| 699 | .ident = "HP Compaq nx6325", |
| 700 | .matches = { |
| 701 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
| 702 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), |
| 703 | }, |
| 704 | /* PCI slot number of the controller */ |
| 705 | .driver_data = (void *)0x12UL, |
| 706 | }, |
| 707 | |
| 708 | { } /* terminate list */ |
| 709 | }; |
| 710 | const struct dmi_system_id *dmi = dmi_first_match(broken_systems); |
| 711 | |
| 712 | if (dmi) { |
| 713 | unsigned long slot = (unsigned long)dmi->driver_data; |
| 714 | /* apply the quirk only to on-board controllers */ |
| 715 | return slot == PCI_SLOT(pdev->devfn); |
| 716 | } |
| 717 | |
| 718 | return false; |
| 719 | } |
| 720 | |
| 721 | static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 722 | { |
| 723 | int board_id = ent->driver_data; |
| 724 | struct ata_port_info pi = sil_port_info[board_id]; |
| 725 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
| 726 | struct ata_host *host; |
| 727 | void __iomem *mmio_base; |
| 728 | int n_ports, rc; |
| 729 | unsigned int i; |
| 730 | |
| 731 | ata_print_version_once(&pdev->dev, DRV_VERSION); |
| 732 | |
| 733 | /* allocate host */ |
| 734 | n_ports = 2; |
| 735 | if (board_id == sil_3114) |
| 736 | n_ports = 4; |
| 737 | |
| 738 | if (sil_broken_system_poweroff(pdev)) { |
| 739 | pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN | |
| 740 | ATA_FLAG_NO_HIBERNATE_SPINDOWN; |
| 741 | dev_info(&pdev->dev, "quirky BIOS, skipping spindown " |
| 742 | "on poweroff and hibernation\n"); |
| 743 | } |
| 744 | |
| 745 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); |
| 746 | if (!host) |
| 747 | return -ENOMEM; |
| 748 | |
| 749 | /* acquire resources and fill host */ |
| 750 | rc = pcim_enable_device(pdev); |
| 751 | if (rc) |
| 752 | return rc; |
| 753 | |
| 754 | rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); |
| 755 | if (rc == -EBUSY) |
| 756 | pcim_pin_device(pdev); |
| 757 | if (rc) |
| 758 | return rc; |
| 759 | host->iomap = pcim_iomap_table(pdev); |
| 760 | |
| 761 | rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK); |
| 762 | if (rc) |
| 763 | return rc; |
| 764 | |
| 765 | mmio_base = host->iomap[SIL_MMIO_BAR]; |
| 766 | |
| 767 | for (i = 0; i < host->n_ports; i++) { |
| 768 | struct ata_port *ap = host->ports[i]; |
| 769 | struct ata_ioports *ioaddr = &ap->ioaddr; |
| 770 | |
| 771 | ioaddr->cmd_addr = mmio_base + sil_port[i].tf; |
| 772 | ioaddr->altstatus_addr = |
| 773 | ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; |
| 774 | ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; |
| 775 | ioaddr->scr_addr = mmio_base + sil_port[i].scr; |
| 776 | ata_sff_std_ports(ioaddr); |
| 777 | |
| 778 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); |
| 779 | ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); |
| 780 | } |
| 781 | |
| 782 | /* initialize and activate */ |
| 783 | sil_init_controller(host); |
| 784 | |
| 785 | pci_set_master(pdev); |
| 786 | return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, |
| 787 | &sil_sht); |
| 788 | } |
| 789 | |
| 790 | #ifdef CONFIG_PM_SLEEP |
| 791 | static int sil_pci_device_resume(struct pci_dev *pdev) |
| 792 | { |
| 793 | struct ata_host *host = pci_get_drvdata(pdev); |
| 794 | int rc; |
| 795 | |
| 796 | rc = ata_pci_device_do_resume(pdev); |
| 797 | if (rc) |
| 798 | return rc; |
| 799 | |
| 800 | sil_init_controller(host); |
| 801 | ata_host_resume(host); |
| 802 | |
| 803 | return 0; |
| 804 | } |
| 805 | #endif |
| 806 | |
| 807 | module_pci_driver(sil_pci_driver); |