2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
216 struct nv_adma_prd *aprd;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
225 struct nv_host_priv {
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
233 static int nv_pci_device_resume(struct pci_dev *pdev);
234 static void nv_ck804_host_stop(struct ata_host *host);
235 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
237 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
238 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
239 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
241 static void nv_nf2_freeze(struct ata_port *ap);
242 static void nv_nf2_thaw(struct ata_port *ap);
243 static void nv_ck804_freeze(struct ata_port *ap);
244 static void nv_ck804_thaw(struct ata_port *ap);
245 static void nv_error_handler(struct ata_port *ap);
246 static int nv_adma_slave_config(struct scsi_device *sdev);
247 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
248 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
249 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
250 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
251 static void nv_adma_irq_clear(struct ata_port *ap);
252 static int nv_adma_port_start(struct ata_port *ap);
253 static void nv_adma_port_stop(struct ata_port *ap);
254 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
255 static int nv_adma_port_resume(struct ata_port *ap);
256 static void nv_adma_error_handler(struct ata_port *ap);
257 static void nv_adma_host_stop(struct ata_host *host);
258 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
264 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
269 static const struct pci_device_id nv_pci_tbl[] = {
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
287 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
288 PCI_ANY_ID, PCI_ANY_ID,
289 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
291 { } /* terminate list */
294 static struct pci_driver nv_pci_driver = {
296 .id_table = nv_pci_tbl,
297 .probe = nv_init_one,
298 .suspend = ata_pci_device_suspend,
299 .resume = nv_pci_device_resume,
300 .remove = nv_remove_one,
303 static struct scsi_host_template nv_sht = {
304 .module = THIS_MODULE,
306 .ioctl = ata_scsi_ioctl,
307 .queuecommand = ata_scsi_queuecmd,
308 .can_queue = ATA_DEF_QUEUE,
309 .this_id = ATA_SHT_THIS_ID,
310 .sg_tablesize = LIBATA_MAX_PRD,
311 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
312 .emulated = ATA_SHT_EMULATED,
313 .use_clustering = ATA_SHT_USE_CLUSTERING,
314 .proc_name = DRV_NAME,
315 .dma_boundary = ATA_DMA_BOUNDARY,
316 .slave_configure = ata_scsi_slave_config,
317 .slave_destroy = ata_scsi_slave_destroy,
318 .bios_param = ata_std_bios_param,
319 .suspend = ata_scsi_device_suspend,
320 .resume = ata_scsi_device_resume,
323 static struct scsi_host_template nv_adma_sht = {
324 .module = THIS_MODULE,
326 .ioctl = ata_scsi_ioctl,
327 .queuecommand = ata_scsi_queuecmd,
328 .can_queue = NV_ADMA_MAX_CPBS,
329 .this_id = ATA_SHT_THIS_ID,
330 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
331 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
332 .emulated = ATA_SHT_EMULATED,
333 .use_clustering = ATA_SHT_USE_CLUSTERING,
334 .proc_name = DRV_NAME,
335 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
336 .slave_configure = nv_adma_slave_config,
337 .slave_destroy = ata_scsi_slave_destroy,
338 .bios_param = ata_std_bios_param,
339 .suspend = ata_scsi_device_suspend,
340 .resume = ata_scsi_device_resume,
343 static const struct ata_port_operations nv_generic_ops = {
344 .port_disable = ata_port_disable,
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .exec_command = ata_exec_command,
348 .check_status = ata_check_status,
349 .dev_select = ata_std_dev_select,
350 .bmdma_setup = ata_bmdma_setup,
351 .bmdma_start = ata_bmdma_start,
352 .bmdma_stop = ata_bmdma_stop,
353 .bmdma_status = ata_bmdma_status,
354 .qc_prep = ata_qc_prep,
355 .qc_issue = ata_qc_issue_prot,
356 .freeze = ata_bmdma_freeze,
357 .thaw = ata_bmdma_thaw,
358 .error_handler = nv_error_handler,
359 .post_internal_cmd = ata_bmdma_post_internal_cmd,
360 .data_xfer = ata_data_xfer,
361 .irq_handler = nv_generic_interrupt,
362 .irq_clear = ata_bmdma_irq_clear,
363 .irq_on = ata_irq_on,
364 .irq_ack = ata_irq_ack,
365 .scr_read = nv_scr_read,
366 .scr_write = nv_scr_write,
367 .port_start = ata_port_start,
370 static const struct ata_port_operations nv_nf2_ops = {
371 .port_disable = ata_port_disable,
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .exec_command = ata_exec_command,
375 .check_status = ata_check_status,
376 .dev_select = ata_std_dev_select,
377 .bmdma_setup = ata_bmdma_setup,
378 .bmdma_start = ata_bmdma_start,
379 .bmdma_stop = ata_bmdma_stop,
380 .bmdma_status = ata_bmdma_status,
381 .qc_prep = ata_qc_prep,
382 .qc_issue = ata_qc_issue_prot,
383 .freeze = nv_nf2_freeze,
385 .error_handler = nv_error_handler,
386 .post_internal_cmd = ata_bmdma_post_internal_cmd,
387 .data_xfer = ata_data_xfer,
388 .irq_handler = nv_nf2_interrupt,
389 .irq_clear = ata_bmdma_irq_clear,
390 .irq_on = ata_irq_on,
391 .irq_ack = ata_irq_ack,
392 .scr_read = nv_scr_read,
393 .scr_write = nv_scr_write,
394 .port_start = ata_port_start,
397 static const struct ata_port_operations nv_ck804_ops = {
398 .port_disable = ata_port_disable,
399 .tf_load = ata_tf_load,
400 .tf_read = ata_tf_read,
401 .exec_command = ata_exec_command,
402 .check_status = ata_check_status,
403 .dev_select = ata_std_dev_select,
404 .bmdma_setup = ata_bmdma_setup,
405 .bmdma_start = ata_bmdma_start,
406 .bmdma_stop = ata_bmdma_stop,
407 .bmdma_status = ata_bmdma_status,
408 .qc_prep = ata_qc_prep,
409 .qc_issue = ata_qc_issue_prot,
410 .freeze = nv_ck804_freeze,
411 .thaw = nv_ck804_thaw,
412 .error_handler = nv_error_handler,
413 .post_internal_cmd = ata_bmdma_post_internal_cmd,
414 .data_xfer = ata_data_xfer,
415 .irq_handler = nv_ck804_interrupt,
416 .irq_clear = ata_bmdma_irq_clear,
417 .irq_on = ata_irq_on,
418 .irq_ack = ata_irq_ack,
419 .scr_read = nv_scr_read,
420 .scr_write = nv_scr_write,
421 .port_start = ata_port_start,
422 .host_stop = nv_ck804_host_stop,
425 static const struct ata_port_operations nv_adma_ops = {
426 .port_disable = ata_port_disable,
427 .tf_load = ata_tf_load,
428 .tf_read = ata_tf_read,
429 .check_atapi_dma = nv_adma_check_atapi_dma,
430 .exec_command = ata_exec_command,
431 .check_status = ata_check_status,
432 .dev_select = ata_std_dev_select,
433 .bmdma_setup = ata_bmdma_setup,
434 .bmdma_start = ata_bmdma_start,
435 .bmdma_stop = ata_bmdma_stop,
436 .bmdma_status = ata_bmdma_status,
437 .qc_prep = nv_adma_qc_prep,
438 .qc_issue = nv_adma_qc_issue,
439 .freeze = nv_ck804_freeze,
440 .thaw = nv_ck804_thaw,
441 .error_handler = nv_adma_error_handler,
442 .post_internal_cmd = nv_adma_post_internal_cmd,
443 .data_xfer = ata_data_xfer,
444 .irq_handler = nv_adma_interrupt,
445 .irq_clear = nv_adma_irq_clear,
446 .irq_on = ata_irq_on,
447 .irq_ack = ata_irq_ack,
448 .scr_read = nv_scr_read,
449 .scr_write = nv_scr_write,
450 .port_start = nv_adma_port_start,
451 .port_stop = nv_adma_port_stop,
452 .port_suspend = nv_adma_port_suspend,
453 .port_resume = nv_adma_port_resume,
454 .host_stop = nv_adma_host_stop,
457 static struct ata_port_info nv_port_info[] = {
461 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
462 ATA_FLAG_HRST_TO_RESUME,
463 .pio_mask = NV_PIO_MASK,
464 .mwdma_mask = NV_MWDMA_MASK,
465 .udma_mask = NV_UDMA_MASK,
466 .port_ops = &nv_generic_ops,
471 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
472 ATA_FLAG_HRST_TO_RESUME,
473 .pio_mask = NV_PIO_MASK,
474 .mwdma_mask = NV_MWDMA_MASK,
475 .udma_mask = NV_UDMA_MASK,
476 .port_ops = &nv_nf2_ops,
481 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
482 ATA_FLAG_HRST_TO_RESUME,
483 .pio_mask = NV_PIO_MASK,
484 .mwdma_mask = NV_MWDMA_MASK,
485 .udma_mask = NV_UDMA_MASK,
486 .port_ops = &nv_ck804_ops,
491 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
492 ATA_FLAG_HRST_TO_RESUME |
493 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
494 .pio_mask = NV_PIO_MASK,
495 .mwdma_mask = NV_MWDMA_MASK,
496 .udma_mask = NV_UDMA_MASK,
497 .port_ops = &nv_adma_ops,
501 MODULE_AUTHOR("NVIDIA");
502 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
503 MODULE_LICENSE("GPL");
504 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
505 MODULE_VERSION(DRV_VERSION);
507 static int adma_enabled = 1;
509 static void nv_adma_register_mode(struct ata_port *ap)
511 struct nv_adma_port_priv *pp = ap->private_data;
512 void __iomem *mmio = pp->ctl_block;
516 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
519 status = readw(mmio + NV_ADMA_STAT);
520 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
522 status = readw(mmio + NV_ADMA_STAT);
526 ata_port_printk(ap, KERN_WARNING,
527 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
530 tmp = readw(mmio + NV_ADMA_CTL);
531 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
534 status = readw(mmio + NV_ADMA_STAT);
535 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
537 status = readw(mmio + NV_ADMA_STAT);
541 ata_port_printk(ap, KERN_WARNING,
542 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
545 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
548 static void nv_adma_mode(struct ata_port *ap)
550 struct nv_adma_port_priv *pp = ap->private_data;
551 void __iomem *mmio = pp->ctl_block;
555 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
558 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
560 tmp = readw(mmio + NV_ADMA_CTL);
561 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
563 status = readw(mmio + NV_ADMA_STAT);
564 while(((status & NV_ADMA_STAT_LEGACY) ||
565 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
567 status = readw(mmio + NV_ADMA_STAT);
571 ata_port_printk(ap, KERN_WARNING,
572 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
575 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
578 static int nv_adma_slave_config(struct scsi_device *sdev)
580 struct ata_port *ap = ata_shost_to_port(sdev->host);
581 struct nv_adma_port_priv *pp = ap->private_data;
582 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
584 unsigned long segment_boundary;
585 unsigned short sg_tablesize;
588 u32 current_reg, new_reg, config_mask;
590 rc = ata_scsi_slave_config(sdev);
592 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
593 /* Not a proper libata device, ignore */
596 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
598 * NVIDIA reports that ADMA mode does not support ATAPI commands.
599 * Therefore ATAPI commands are sent through the legacy interface.
600 * However, the legacy interface only supports 32-bit DMA.
601 * Restrict DMA parameters as required by the legacy interface
602 * when an ATAPI device is connected.
604 bounce_limit = ATA_DMA_MASK;
605 segment_boundary = ATA_DMA_BOUNDARY;
606 /* Subtract 1 since an extra entry may be needed for padding, see
608 sg_tablesize = LIBATA_MAX_PRD - 1;
610 /* Since the legacy DMA engine is in use, we need to disable ADMA
613 nv_adma_register_mode(ap);
616 bounce_limit = *ap->dev->dma_mask;
617 segment_boundary = NV_ADMA_DMA_BOUNDARY;
618 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
622 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
625 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
626 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
628 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
629 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
632 new_reg = current_reg | config_mask;
633 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
636 new_reg = current_reg & ~config_mask;
637 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
640 if(current_reg != new_reg)
641 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
643 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
644 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
645 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
646 ata_port_printk(ap, KERN_INFO,
647 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
648 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
652 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
654 struct nv_adma_port_priv *pp = qc->ap->private_data;
655 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
658 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
660 unsigned int idx = 0;
662 if(tf->flags & ATA_TFLAG_ISADDR) {
663 if (tf->flags & ATA_TFLAG_LBA48) {
664 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
665 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
666 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
667 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
668 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
669 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
671 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
673 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
674 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
675 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
676 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
679 if(tf->flags & ATA_TFLAG_DEVICE)
680 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
682 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
685 cpb[idx++] = cpu_to_le16(IGN);
690 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
692 struct nv_adma_port_priv *pp = ap->private_data;
693 u8 flags = pp->cpb[cpb_num].resp_flags;
695 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
697 if (unlikely((force_err ||
698 flags & (NV_CPB_RESP_ATA_ERR |
699 NV_CPB_RESP_CMD_ERR |
700 NV_CPB_RESP_CPB_ERR)))) {
701 struct ata_eh_info *ehi = &ap->eh_info;
704 ata_ehi_clear_desc(ehi);
705 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
706 if (flags & NV_CPB_RESP_ATA_ERR) {
707 ata_ehi_push_desc(ehi, ": ATA error");
708 ehi->err_mask |= AC_ERR_DEV;
709 } else if (flags & NV_CPB_RESP_CMD_ERR) {
710 ata_ehi_push_desc(ehi, ": CMD error");
711 ehi->err_mask |= AC_ERR_DEV;
712 } else if (flags & NV_CPB_RESP_CPB_ERR) {
713 ata_ehi_push_desc(ehi, ": CPB error");
714 ehi->err_mask |= AC_ERR_SYSTEM;
717 /* notifier error, but no error in CPB flags? */
718 ehi->err_mask |= AC_ERR_OTHER;
721 /* Kill all commands. EH will determine what actually failed. */
729 if (flags & NV_CPB_RESP_DONE) {
730 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
731 VPRINTK("CPB flags done, flags=0x%x\n", flags);
733 /* Grab the ATA port status for non-NCQ commands.
734 For NCQ commands the current status may have nothing to do with
735 the command just completed. */
736 if (qc->tf.protocol != ATA_PROT_NCQ) {
737 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
738 qc->err_mask |= ac_err_mask(ata_status);
740 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
744 struct ata_eh_info *ehi = &ap->eh_info;
745 /* Notifier bits set without a command may indicate the drive
746 is misbehaving. Raise host state machine violation on this
748 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
750 ehi->err_mask |= AC_ERR_HSM;
751 ehi->action |= ATA_EH_SOFTRESET;
759 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
761 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
763 /* freeze if hotplugged */
764 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
769 /* bail out if not our interrupt */
770 if (!(irq_stat & NV_INT_DEV))
773 /* DEV interrupt w/ no active qc? */
774 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
775 ata_check_status(ap);
779 /* handle interrupt */
780 return ata_host_intr(ap, qc);
783 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
785 struct ata_host *host = dev_instance;
787 u32 notifier_clears[2];
789 spin_lock(&host->lock);
791 for (i = 0; i < host->n_ports; i++) {
792 struct ata_port *ap = host->ports[i];
793 notifier_clears[i] = 0;
795 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
796 struct nv_adma_port_priv *pp = ap->private_data;
797 void __iomem *mmio = pp->ctl_block;
800 u32 notifier, notifier_error;
802 /* if in ATA register mode, use standard ata interrupt handler */
803 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
804 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
805 >> (NV_INT_PORT_SHIFT * i);
806 if(ata_tag_valid(ap->active_tag))
807 /** NV_INT_DEV indication seems unreliable at times
808 at least in ADMA mode. Force it on always when a
809 command is active, to prevent losing interrupts. */
810 irq_stat |= NV_INT_DEV;
811 handled += nv_host_intr(ap, irq_stat);
815 notifier = readl(mmio + NV_ADMA_NOTIFIER);
816 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
817 notifier_clears[i] = notifier | notifier_error;
819 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
821 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
826 status = readw(mmio + NV_ADMA_STAT);
828 /* Clear status. Ensure the controller sees the clearing before we start
829 looking at any of the CPB statuses, so that any CPB completions after
830 this point in the handler will raise another interrupt. */
831 writew(status, mmio + NV_ADMA_STAT);
832 readw(mmio + NV_ADMA_STAT); /* flush posted write */
835 handled++; /* irq handled if we got here */
837 /* freeze if hotplugged or controller error */
838 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
839 NV_ADMA_STAT_HOTUNPLUG |
840 NV_ADMA_STAT_TIMEOUT |
841 NV_ADMA_STAT_SERROR))) {
842 struct ata_eh_info *ehi = &ap->eh_info;
844 ata_ehi_clear_desc(ehi);
845 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
846 if (status & NV_ADMA_STAT_TIMEOUT) {
847 ehi->err_mask |= AC_ERR_SYSTEM;
848 ata_ehi_push_desc(ehi, ": timeout");
849 } else if (status & NV_ADMA_STAT_HOTPLUG) {
850 ata_ehi_hotplugged(ehi);
851 ata_ehi_push_desc(ehi, ": hotplug");
852 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
853 ata_ehi_hotplugged(ehi);
854 ata_ehi_push_desc(ehi, ": hot unplug");
855 } else if (status & NV_ADMA_STAT_SERROR) {
856 /* let libata analyze SError and figure out the cause */
857 ata_ehi_push_desc(ehi, ": SError");
863 if (status & (NV_ADMA_STAT_DONE |
864 NV_ADMA_STAT_CPBERR)) {
865 u32 check_commands = notifier | notifier_error;
867 /** Check CPBs for completed commands */
868 while ((pos = ffs(check_commands)) && !error) {
870 error = nv_adma_check_cpb(ap, pos,
871 notifier_error & (1 << pos) );
872 check_commands &= ~(1 << pos );
878 if(notifier_clears[0] || notifier_clears[1]) {
879 /* Note: Both notifier clear registers must be written
880 if either is set, even if one is zero, according to NVIDIA. */
881 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
882 writel(notifier_clears[0], pp->notifier_clear_block);
883 pp = host->ports[1]->private_data;
884 writel(notifier_clears[1], pp->notifier_clear_block);
887 spin_unlock(&host->lock);
889 return IRQ_RETVAL(handled);
892 static void nv_adma_irq_clear(struct ata_port *ap)
894 struct nv_adma_port_priv *pp = ap->private_data;
895 void __iomem *mmio = pp->ctl_block;
896 u16 status = readw(mmio + NV_ADMA_STAT);
897 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
898 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
899 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
901 /* clear ADMA status */
902 writew(status, mmio + NV_ADMA_STAT);
903 writel(notifier | notifier_error,
904 pp->notifier_clear_block);
906 /** clear legacy status */
907 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
910 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
912 struct nv_adma_port_priv *pp = qc->ap->private_data;
914 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
915 ata_bmdma_post_internal_cmd(qc);
918 static int nv_adma_port_start(struct ata_port *ap)
920 struct device *dev = ap->host->dev;
921 struct nv_adma_port_priv *pp;
930 rc = ata_port_start(ap);
934 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
938 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
939 ap->port_no * NV_ADMA_PORT_SIZE;
940 pp->ctl_block = mmio;
941 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
942 pp->notifier_clear_block = pp->gen_block +
943 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
945 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
946 &mem_dma, GFP_KERNEL);
949 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
952 * First item in chunk of DMA memory:
953 * 128-byte command parameter block (CPB)
954 * one for each command tag
957 pp->cpb_dma = mem_dma;
959 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
960 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
962 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
963 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
966 * Second item: block of ADMA_SGTBL_LEN s/g entries
969 pp->aprd_dma = mem_dma;
971 ap->private_data = pp;
973 /* clear any outstanding interrupt conditions */
974 writew(0xffff, mmio + NV_ADMA_STAT);
976 /* initialize port variables */
977 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
979 /* clear CPB fetch count */
980 writew(0, mmio + NV_ADMA_CPB_COUNT);
982 /* clear GO for register mode, enable interrupt */
983 tmp = readw(mmio + NV_ADMA_CTL);
984 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
985 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
987 tmp = readw(mmio + NV_ADMA_CTL);
988 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
989 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
991 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
992 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
997 static void nv_adma_port_stop(struct ata_port *ap)
999 struct nv_adma_port_priv *pp = ap->private_data;
1000 void __iomem *mmio = pp->ctl_block;
1003 writew(0, mmio + NV_ADMA_CTL);
1006 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1008 struct nv_adma_port_priv *pp = ap->private_data;
1009 void __iomem *mmio = pp->ctl_block;
1011 /* Go to register mode - clears GO */
1012 nv_adma_register_mode(ap);
1014 /* clear CPB fetch count */
1015 writew(0, mmio + NV_ADMA_CPB_COUNT);
1017 /* disable interrupt, shut down port */
1018 writew(0, mmio + NV_ADMA_CTL);
1023 static int nv_adma_port_resume(struct ata_port *ap)
1025 struct nv_adma_port_priv *pp = ap->private_data;
1026 void __iomem *mmio = pp->ctl_block;
1029 /* set CPB block location */
1030 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1031 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1033 /* clear any outstanding interrupt conditions */
1034 writew(0xffff, mmio + NV_ADMA_STAT);
1036 /* initialize port variables */
1037 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1039 /* clear CPB fetch count */
1040 writew(0, mmio + NV_ADMA_CPB_COUNT);
1042 /* clear GO for register mode, enable interrupt */
1043 tmp = readw(mmio + NV_ADMA_CTL);
1044 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1045 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1047 tmp = readw(mmio + NV_ADMA_CTL);
1048 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1049 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1051 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1052 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1057 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1059 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1060 struct ata_ioports *ioport = &probe_ent->port[port];
1064 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1066 ioport->cmd_addr = mmio;
1067 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1068 ioport->error_addr =
1069 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1070 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1071 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1072 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1073 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1074 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1075 ioport->status_addr =
1076 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1077 ioport->altstatus_addr =
1078 ioport->ctl_addr = mmio + 0x20;
1081 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1083 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1089 /* enable ADMA on the ports */
1090 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1091 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1092 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1093 NV_MCP_SATA_CFG_20_PORT1_EN |
1094 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1096 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1098 for (i = 0; i < probe_ent->n_ports; i++)
1099 nv_adma_setup_port(probe_ent, i);
1104 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1105 struct scatterlist *sg,
1107 struct nv_adma_prd *aprd)
1110 if (qc->tf.flags & ATA_TFLAG_WRITE)
1111 flags |= NV_APRD_WRITE;
1112 if (idx == qc->n_elem - 1)
1113 flags |= NV_APRD_END;
1115 flags |= NV_APRD_CONT;
1117 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1118 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1119 aprd->flags = flags;
1120 aprd->packet_len = 0;
1123 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1125 struct nv_adma_port_priv *pp = qc->ap->private_data;
1127 struct nv_adma_prd *aprd;
1128 struct scatterlist *sg;
1134 ata_for_each_sg(sg, qc) {
1135 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1136 nv_adma_fill_aprd(qc, sg, idx, aprd);
1140 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1142 cpb->next_aprd = cpu_to_le64(0);
1145 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1147 struct nv_adma_port_priv *pp = qc->ap->private_data;
1149 /* ADMA engine can only be used for non-ATAPI DMA commands,
1150 or interrupt-driven no-data commands. */
1151 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1152 (qc->tf.flags & ATA_TFLAG_POLLING))
1155 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1156 (qc->tf.protocol == ATA_PROT_NODATA))
1162 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1164 struct nv_adma_port_priv *pp = qc->ap->private_data;
1165 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1166 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1169 if (nv_adma_use_reg_mode(qc)) {
1170 nv_adma_register_mode(qc->ap);
1175 cpb->resp_flags = NV_CPB_RESP_DONE;
1182 cpb->next_cpb_idx = 0;
1184 /* turn on NCQ flags for NCQ commands */
1185 if (qc->tf.protocol == ATA_PROT_NCQ)
1186 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1188 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1190 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1192 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1193 nv_adma_fill_sg(qc, cpb);
1194 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1196 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1198 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1199 finished filling in all of the contents */
1201 cpb->ctl_flags = ctl_flags;
1203 cpb->resp_flags = 0;
1206 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1208 struct nv_adma_port_priv *pp = qc->ap->private_data;
1209 void __iomem *mmio = pp->ctl_block;
1210 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1214 if (nv_adma_use_reg_mode(qc)) {
1215 /* use ATA register mode */
1216 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1217 nv_adma_register_mode(qc->ap);
1218 return ata_qc_issue_prot(qc);
1220 nv_adma_mode(qc->ap);
1222 /* write append register, command tag in lower 8 bits
1223 and (number of cpbs to append -1) in top 8 bits */
1226 if(curr_ncq != pp->last_issue_ncq) {
1227 /* Seems to need some delay before switching between NCQ and non-NCQ
1228 commands, else we get command timeouts and such. */
1230 pp->last_issue_ncq = curr_ncq;
1233 writew(qc->tag, mmio + NV_ADMA_APPEND);
1235 DPRINTK("Issued tag %u\n",qc->tag);
1240 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1242 struct ata_host *host = dev_instance;
1244 unsigned int handled = 0;
1245 unsigned long flags;
1247 spin_lock_irqsave(&host->lock, flags);
1249 for (i = 0; i < host->n_ports; i++) {
1250 struct ata_port *ap;
1252 ap = host->ports[i];
1254 !(ap->flags & ATA_FLAG_DISABLED)) {
1255 struct ata_queued_cmd *qc;
1257 qc = ata_qc_from_tag(ap, ap->active_tag);
1258 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1259 handled += ata_host_intr(ap, qc);
1261 // No request pending? Clear interrupt status
1262 // anyway, in case there's one pending.
1263 ap->ops->check_status(ap);
1268 spin_unlock_irqrestore(&host->lock, flags);
1270 return IRQ_RETVAL(handled);
1273 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1277 for (i = 0; i < host->n_ports; i++) {
1278 struct ata_port *ap = host->ports[i];
1280 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1281 handled += nv_host_intr(ap, irq_stat);
1283 irq_stat >>= NV_INT_PORT_SHIFT;
1286 return IRQ_RETVAL(handled);
1289 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1291 struct ata_host *host = dev_instance;
1295 spin_lock(&host->lock);
1296 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1297 ret = nv_do_interrupt(host, irq_stat);
1298 spin_unlock(&host->lock);
1303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1305 struct ata_host *host = dev_instance;
1309 spin_lock(&host->lock);
1310 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1311 ret = nv_do_interrupt(host, irq_stat);
1312 spin_unlock(&host->lock);
1317 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1319 if (sc_reg > SCR_CONTROL)
1322 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1325 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1327 if (sc_reg > SCR_CONTROL)
1330 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1333 static void nv_nf2_freeze(struct ata_port *ap)
1335 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1336 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1339 mask = ioread8(scr_addr + NV_INT_ENABLE);
1340 mask &= ~(NV_INT_ALL << shift);
1341 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1344 static void nv_nf2_thaw(struct ata_port *ap)
1346 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1347 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1350 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1352 mask = ioread8(scr_addr + NV_INT_ENABLE);
1353 mask |= (NV_INT_MASK << shift);
1354 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1357 static void nv_ck804_freeze(struct ata_port *ap)
1359 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1360 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1363 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1364 mask &= ~(NV_INT_ALL << shift);
1365 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1368 static void nv_ck804_thaw(struct ata_port *ap)
1370 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1371 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1374 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1376 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1377 mask |= (NV_INT_MASK << shift);
1378 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1381 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1385 /* SATA hardreset fails to retrieve proper device signature on
1386 * some controllers. Don't classify on hardreset. For more
1387 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1389 return sata_std_hardreset(ap, &dummy);
1392 static void nv_error_handler(struct ata_port *ap)
1394 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1395 nv_hardreset, ata_std_postreset);
1398 static void nv_adma_error_handler(struct ata_port *ap)
1400 struct nv_adma_port_priv *pp = ap->private_data;
1401 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1402 void __iomem *mmio = pp->ctl_block;
1406 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1407 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1408 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1409 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1410 u32 status = readw(mmio + NV_ADMA_STAT);
1411 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1412 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1414 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1415 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1416 "next cpb count 0x%X next cpb idx 0x%x\n",
1417 notifier, notifier_error, gen_ctl, status,
1418 cpb_count, next_cpb_idx);
1420 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1421 struct nv_adma_cpb *cpb = &pp->cpb[i];
1422 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1423 ap->sactive & (1 << i) )
1424 ata_port_printk(ap, KERN_ERR,
1425 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1426 i, cpb->ctl_flags, cpb->resp_flags);
1430 /* Push us back into port register mode for error handling. */
1431 nv_adma_register_mode(ap);
1433 /* Mark all of the CPBs as invalid to prevent them from being executed */
1434 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1435 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1437 /* clear CPB fetch count */
1438 writew(0, mmio + NV_ADMA_CPB_COUNT);
1441 tmp = readw(mmio + NV_ADMA_CTL);
1442 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1443 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1445 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1446 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1449 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1450 nv_hardreset, ata_std_postreset);
1453 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1455 static int printed_version = 0;
1456 struct ata_port_info *ppi[2];
1457 struct ata_probe_ent *probe_ent;
1458 struct nv_host_priv *hpriv;
1462 unsigned long type = ent->driver_data;
1465 // Make sure this is a SATA controller by counting the number of bars
1466 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1467 // it's an IDE controller and we ignore it.
1468 for (bar=0; bar<6; bar++)
1469 if (pci_resource_start(pdev, bar) == 0)
1472 if (!printed_version++)
1473 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1475 rc = pcim_enable_device(pdev);
1479 rc = pci_request_regions(pdev, DRV_NAME);
1481 pcim_pin_device(pdev);
1485 if(type >= CK804 && adma_enabled) {
1486 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1488 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1489 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1494 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1497 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1504 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1508 ppi[0] = ppi[1] = &nv_port_info[type];
1509 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1513 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1515 probe_ent->iomap = pcim_iomap_table(pdev);
1517 probe_ent->private_data = hpriv;
1520 base = probe_ent->iomap[NV_MMIO_BAR];
1521 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1522 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1524 /* enable SATA space for CK804 */
1525 if (type >= CK804) {
1528 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1529 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1530 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1533 pci_set_master(pdev);
1536 rc = nv_adma_host_init(probe_ent);
1541 rc = ata_device_add(probe_ent);
1545 devm_kfree(&pdev->dev, probe_ent);
1549 static void nv_remove_one (struct pci_dev *pdev)
1551 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1552 struct nv_host_priv *hpriv = host->private_data;
1554 ata_pci_remove_one(pdev);
1558 static int nv_pci_device_resume(struct pci_dev *pdev)
1560 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1561 struct nv_host_priv *hpriv = host->private_data;
1564 rc = ata_pci_device_do_resume(pdev);
1568 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1569 if(hpriv->type >= CK804) {
1572 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1573 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1574 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1576 if(hpriv->type == ADMA) {
1578 struct nv_adma_port_priv *pp;
1579 /* enable/disable ADMA on the ports appropriately */
1580 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1582 pp = host->ports[0]->private_data;
1583 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1584 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1585 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1587 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1588 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1589 pp = host->ports[1]->private_data;
1590 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1591 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1592 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1594 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1595 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1597 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1601 ata_host_resume(host);
1606 static void nv_ck804_host_stop(struct ata_host *host)
1608 struct pci_dev *pdev = to_pci_dev(host->dev);
1611 /* disable SATA space for CK804 */
1612 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1613 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1614 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1617 static void nv_adma_host_stop(struct ata_host *host)
1619 struct pci_dev *pdev = to_pci_dev(host->dev);
1622 /* disable ADMA on the ports */
1623 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1624 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1625 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1626 NV_MCP_SATA_CFG_20_PORT1_EN |
1627 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1629 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1631 nv_ck804_host_stop(host);
1634 static int __init nv_init(void)
1636 return pci_register_driver(&nv_pci_driver);
1639 static void __exit nv_exit(void)
1641 pci_unregister_driver(&nv_pci_driver);
1644 module_init(nv_init);
1645 module_exit(nv_exit);
1646 module_param_named(adma, adma_enabled, bool, 0444);
1647 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");