sata_mv mask all interrupt coalescing bits
[linux-2.6-block.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
e12bef50 4 * Copyright 2008: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
7 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
4a05e209
JG
25/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
1fd2e1c2
ML
33 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
4a05e209
JG
40
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
e49856d8 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
4a05e209 44
40f0bc2d 45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
4a05e209 46
4a05e209
JG
47 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
4a05e209
JG
65*/
66
20f733e7
BR
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
8d8b6004 74#include <linux/dmapool.h>
20f733e7 75#include <linux/dma-mapping.h>
a9524a76 76#include <linux/device.h>
f351b2d6
SB
77#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
15a32632 79#include <linux/mbus.h>
20f733e7 80#include <scsi/scsi_host.h>
193515d5 81#include <scsi/scsi_cmnd.h>
6c08772e 82#include <scsi/scsi_device.h>
20f733e7 83#include <linux/libata.h>
20f733e7
BR
84
85#define DRV_NAME "sata_mv"
1fd2e1c2 86#define DRV_VERSION "1.20"
20f733e7
BR
87
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
20f733e7 105 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 106 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
20f733e7
BR
109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
31961943
BR
115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 124 MV_MAX_SG_CT = 256,
31961943 125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 126
352fab70 127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 128 MV_PORT_HC_SHIFT = 2,
352fab70
ML
129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529 136 /* SoC integrated controllers, no PCI interface */
e12bef50 137 MV_FLAG_SOC = (1 << 28),
7bb3c529 138
c5d3e45a 139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
47c2b677 142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 143
31961943
BR
144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
c5d3e45a 146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
20f733e7
BR
159 /* PCI interface registers */
160
31961943
BR
161 PCI_COMMAND_OFS = 0xc00,
162
20f733e7
BR
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
522479fb
JG
168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
02a121da
ML
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
02a121da
ML
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 186
20f733e7
BR
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
352fab70
ML
191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
20f733e7
BR
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
f9f7fe01 208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
20f733e7
BR
209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_RSVD),
fb621e2f
JG
211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_RSVD_5),
f351b2d6 213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
214
215 /* SATAHC registers */
216 HC_CFG_OFS = 0,
217
218 HC_IRQ_CAUSE_OFS = 0x14,
352fab70
ML
219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
221 DEV_IRQ = (1 << 8), /* shift by port # */
222
223 /* Shadow block registers */
31961943
BR
224 SHD_BLK_OFS = 0x100,
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
226
227 /* SATA registers */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
0c58912e 230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
17c5aab5 231
e12bef50 232 LTMODE_OFS = 0x30c,
17c5aab5
ML
233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234
47c2b677 235 PHY_MODE3 = 0x310,
bca1c4eb
JG
236 PHY_MODE4 = 0x314,
237 PHY_MODE2 = 0x330,
e12bef50
ML
238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
17c5aab5 241
e12bef50 242 FIS_CFG_OFS = 0x360,
17c5aab5
ML
243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244
c9d39130
JG
245 MV5_PHY_MODE = 0x74,
246 MV5_LT_MODE = 0x30,
247 MV5_PHY_CTL = 0x0C,
e12bef50 248 SATA_INTERFACE_CFG = 0x050,
bca1c4eb
JG
249
250 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
251
252 /* Port registers */
253 EDMA_CFG_OFS = 0,
0c58912e
ML
254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7
BR
261
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 278
6c1153e0 279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284
6c1153e0 285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 286
6c1153e0 287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293
6c1153e0 294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 295
6c1153e0 296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
299
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
40f0bc2d
ML
303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
646a4da5 306
bdd4ddde
JG
307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
308 EDMA_ERR_PRD_PAR |
309 EDMA_ERR_DEV_DCON |
310 EDMA_ERR_DEV_CON |
311 EDMA_ERR_SERR |
312 EDMA_ERR_SELF_DIS |
6c1153e0 313 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
314 EDMA_ERR_CRPB_PAR |
315 EDMA_ERR_INTRL_PAR |
316 EDMA_ERR_IORDY |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
e12bef50 321
bdd4ddde
JG
322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
323 EDMA_ERR_PRD_PAR |
324 EDMA_ERR_DEV_DCON |
325 EDMA_ERR_DEV_CON |
326 EDMA_ERR_OVERRUN_5 |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
6c1153e0 329 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
330 EDMA_ERR_CRPB_PAR |
331 EDMA_ERR_INTRL_PAR |
332 EDMA_ERR_IORDY,
20f733e7 333
31961943
BR
334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
336
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
339
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
343 EDMA_RSP_Q_PTR_SHIFT = 3,
344
0ea9e179
JG
345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 349
c9d39130 350 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 351 EDMA_ARB_CFG = 0x38,
bca1c4eb 352
352fab70
ML
353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354
31961943
BR
355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 361 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 366
31961943 367 /* Port private flags (pp_flags) */
0ea9e179 368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
20f733e7
BR
370};
371
ee9ccdf7
JG
372#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 374#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 375#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 376
15a32632
LB
377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
379
095fec88 380enum {
baf14aa1
JG
381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
383 */
384 MV_DMA_BOUNDARY = 0xffffU,
095fec88 385
0ea9e179
JG
386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
388 */
095fec88
JG
389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390
0ea9e179 391 /* ditto, for response queue */
095fec88
JG
392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
393};
394
522479fb
JG
395enum chip_type {
396 chip_504x,
397 chip_508x,
398 chip_5080,
399 chip_604x,
400 chip_608x,
e4e7b892
JG
401 chip_6042,
402 chip_7042,
f351b2d6 403 chip_soc,
522479fb
JG
404};
405
31961943
BR
406/* Command ReQuest Block: 32B */
407struct mv_crqb {
e1469874
ML
408 __le32 sg_addr;
409 __le32 sg_addr_hi;
410 __le16 ctrl_flags;
411 __le16 ata_cmd[11];
31961943 412};
20f733e7 413
e4e7b892 414struct mv_crqb_iie {
e1469874
ML
415 __le32 addr;
416 __le32 addr_hi;
417 __le32 flags;
418 __le32 len;
419 __le32 ata_cmd[4];
e4e7b892
JG
420};
421
31961943
BR
422/* Command ResPonse Block: 8B */
423struct mv_crpb {
e1469874
ML
424 __le16 id;
425 __le16 flags;
426 __le32 tmstmp;
20f733e7
BR
427};
428
31961943
BR
429/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430struct mv_sg {
e1469874
ML
431 __le32 addr;
432 __le32 flags_size;
433 __le32 addr_hi;
434 __le32 reserved;
31961943 435};
20f733e7 436
31961943
BR
437struct mv_port_priv {
438 struct mv_crqb *crqb;
439 dma_addr_t crqb_dma;
440 struct mv_crpb *crpb;
441 dma_addr_t crpb_dma;
eb73d558
ML
442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
444
445 unsigned int req_idx;
446 unsigned int resp_idx;
447
31961943
BR
448 u32 pp_flags;
449};
450
bca1c4eb
JG
451struct mv_port_signal {
452 u32 amps;
453 u32 pre;
454};
455
02a121da
ML
456struct mv_host_priv {
457 u32 hp_flags;
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
f351b2d6
SB
460 int n_ports;
461 void __iomem *base;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
02a121da
ML
464 u32 irq_cause_ofs;
465 u32 irq_mask_ofs;
466 u32 unmask_all_irqs;
da2fa9ba
ML
467 /*
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
471 */
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
02a121da
ML
475};
476
47c2b677 477struct mv_hw_ops {
2a47ce06
JG
478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
47c2b677
JG
480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
c9d39130
JG
483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
522479fb 485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
487};
488
da3dbb17
TH
489static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap);
495static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 496static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
498static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
bdd4ddde
JG
500static void mv_eh_freeze(struct ata_port *ap);
501static void mv_eh_thaw(struct ata_port *ap);
f273827e 502static void mv6_dev_config(struct ata_device *dev);
20f733e7 503
2a47ce06
JG
504static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int port);
47c2b677
JG
506static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
c9d39130
JG
509static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int n_hc);
522479fb 511static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 512static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 513
2a47ce06
JG
514static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 unsigned int port);
47c2b677
JG
516static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
c9d39130
JG
519static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 unsigned int n_hc);
522479fb 521static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
522static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 void __iomem *mmio);
524static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
526static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 void __iomem *mmio);
530static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 531static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 532static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 533 unsigned int port_no);
e12bef50 534static int mv_stop_edma(struct ata_port *ap);
b562468c 535static int mv_stop_edma_engine(void __iomem *port_mmio);
e12bef50 536static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
47c2b677 537
e49856d8
ML
538static void mv_pmp_select(struct ata_port *ap, int pmp);
539static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
47c2b677 543
eb73d558
ML
544/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
547 */
c5d3e45a 548static struct scsi_host_template mv5_sht = {
68d1d07b 549 ATA_BASE_SHT(DRV_NAME),
baf14aa1 550 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 551 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
552};
553
554static struct scsi_host_template mv6_sht = {
68d1d07b 555 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 556 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 557 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 558 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
559};
560
029cfd6b
TH
561static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
c9d39130
JG
563
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
c9d39130 566
bdd4ddde
JG
567 .freeze = mv_eh_freeze,
568 .thaw = mv_eh_thaw,
a1efdaba 569 .hardreset = mv_hardreset,
a1efdaba 570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 571 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 572
c9d39130
JG
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
c9d39130
JG
578};
579
029cfd6b
TH
580static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
e49856d8 582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
f273827e 583 .dev_config = mv6_dev_config,
20f733e7
BR
584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
586
e49856d8
ML
587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
20f733e7
BR
591};
592
029cfd6b
TH
593static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
e49856d8 595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
029cfd6b 596 .dev_config = ATA_OP_NULL,
e4e7b892 597 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
598};
599
98ac62de 600static const struct ata_port_info mv_port_info[] = {
20f733e7 601 { /* chip_504x */
cca3974e 602 .flags = MV_COMMON_FLAGS,
31961943 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
c9d39130 605 .port_ops = &mv5_ops,
20f733e7
BR
606 },
607 { /* chip_508x */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
c9d39130 611 .port_ops = &mv5_ops,
20f733e7 612 },
47c2b677 613 { /* chip_5080 */
c5d3e45a 614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 615 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 616 .udma_mask = ATA_UDMA6,
c9d39130 617 .port_ops = &mv5_ops,
47c2b677 618 },
20f733e7 619 { /* chip_604x */
138bfdd0 620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 622 ATA_FLAG_NCQ,
31961943 623 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 624 .udma_mask = ATA_UDMA6,
c9d39130 625 .port_ops = &mv6_ops,
20f733e7
BR
626 },
627 { /* chip_608x */
c5d3e45a 628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 631 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 632 .udma_mask = ATA_UDMA6,
c9d39130 633 .port_ops = &mv6_ops,
20f733e7 634 },
e4e7b892 635 { /* chip_6042 */
138bfdd0 636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 638 ATA_FLAG_NCQ,
e4e7b892 639 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 640 .udma_mask = ATA_UDMA6,
e4e7b892
JG
641 .port_ops = &mv_iie_ops,
642 },
643 { /* chip_7042 */
138bfdd0 644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 646 ATA_FLAG_NCQ,
e4e7b892 647 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 648 .udma_mask = ATA_UDMA6,
e4e7b892
JG
649 .port_ops = &mv_iie_ops,
650 },
f351b2d6 651 { /* chip_soc */
02c1f32f 652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
02c1f32f 654 ATA_FLAG_NCQ | MV_FLAG_SOC,
17c5aab5
ML
655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
f351b2d6 658 },
20f733e7
BR
659};
660
3b7d697d 661static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
669
670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
675
676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677
d9f9c6bc
FA
678 /* Adaptec 1430SA */
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680
02a121da 681 /* Marvell 7042 support */
6a3d586d
MT
682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683
02a121da
ML
684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687
2d2744fc 688 { } /* terminate list */
20f733e7
BR
689};
690
47c2b677
JG
691static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
522479fb
JG
696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
47c2b677
JG
698};
699
700static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
522479fb
JG
705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
707};
708
f351b2d6
SB
709static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
716};
717
20f733e7
BR
718/*
719 * Functions
720 */
721
722static inline void writelfl(unsigned long data, void __iomem *addr)
723{
724 writel(data, addr);
725 (void) readl(addr); /* flush to avoid PCI posted write */
726}
727
c9d39130
JG
728static inline unsigned int mv_hc_from_port(unsigned int port)
729{
730 return port >> MV_PORT_HC_SHIFT;
731}
732
733static inline unsigned int mv_hardport_from_port(unsigned int port)
734{
735 return port & MV_PORT_MASK;
736}
737
352fab70
ML
738static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
739{
740 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
741}
742
c9d39130
JG
743static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
744 unsigned int port)
745{
746 return mv_hc_base(base, mv_hc_from_port(port));
747}
748
20f733e7
BR
749static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
750{
c9d39130 751 return mv_hc_base_from_port(base, port) +
8b260248 752 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 753 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
754}
755
e12bef50
ML
756static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
757{
758 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
759 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
760
761 return hc_mmio + ofs;
762}
763
f351b2d6
SB
764static inline void __iomem *mv_host_base(struct ata_host *host)
765{
766 struct mv_host_priv *hpriv = host->private_data;
767 return hpriv->base;
768}
769
20f733e7
BR
770static inline void __iomem *mv_ap_base(struct ata_port *ap)
771{
f351b2d6 772 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
773}
774
cca3974e 775static inline int mv_get_hc_count(unsigned long port_flags)
31961943 776{
cca3974e 777 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
778}
779
c5d3e45a
JG
780static void mv_set_edma_ptrs(void __iomem *port_mmio,
781 struct mv_host_priv *hpriv,
782 struct mv_port_priv *pp)
783{
bdd4ddde
JG
784 u32 index;
785
c5d3e45a
JG
786 /*
787 * initialize request queue
788 */
bdd4ddde
JG
789 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
790
c5d3e45a
JG
791 WARN_ON(pp->crqb_dma & 0x3ff);
792 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 793 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
794 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
795
796 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 797 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
798 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
799 else
bdd4ddde 800 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
801
802 /*
803 * initialize response queue
804 */
bdd4ddde
JG
805 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
806
c5d3e45a
JG
807 WARN_ON(pp->crpb_dma & 0xff);
808 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
809
810 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 811 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
812 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
813 else
bdd4ddde 814 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 815
bdd4ddde 816 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 817 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
818}
819
05b308e1
BR
820/**
821 * mv_start_dma - Enable eDMA engine
822 * @base: port base address
823 * @pp: port private data
824 *
beec7dbc
TH
825 * Verify the local cache of the eDMA state is accurate with a
826 * WARN_ON.
05b308e1
BR
827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
0c58912e 831static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 832 struct mv_port_priv *pp, u8 protocol)
20f733e7 833{
72109168
ML
834 int want_ncq = (protocol == ATA_PROT_NCQ);
835
836 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
837 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
838 if (want_ncq != using_ncq)
b562468c 839 mv_stop_edma(ap);
72109168 840 }
c5d3e45a 841 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 842 struct mv_host_priv *hpriv = ap->host->private_data;
352fab70 843 int hardport = mv_hardport_from_port(ap->port_no);
0c58912e 844 void __iomem *hc_mmio = mv_hc_base_from_port(
352fab70 845 mv_host_base(ap->host), hardport);
0c58912e
ML
846 u32 hc_irq_cause, ipending;
847
bdd4ddde 848 /* clear EDMA event indicators, if any */
f630d562 849 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 850
0c58912e
ML
851 /* clear EDMA interrupt indicator, if any */
852 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
352fab70 853 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
0c58912e
ML
854 if (hc_irq_cause & ipending) {
855 writelfl(hc_irq_cause & ~ipending,
856 hc_mmio + HC_IRQ_CAUSE_OFS);
857 }
858
e12bef50 859 mv_edma_cfg(ap, want_ncq);
0c58912e
ML
860
861 /* clear FIS IRQ Cause */
862 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
863
f630d562 864 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 865
f630d562 866 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
867 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
868 }
20f733e7
BR
869}
870
05b308e1 871/**
e12bef50 872 * mv_stop_edma_engine - Disable eDMA engine
b562468c 873 * @port_mmio: io base address
05b308e1
BR
874 *
875 * LOCKING:
876 * Inherited from caller.
877 */
b562468c 878static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 879{
b562468c 880 int i;
31961943 881
b562468c
ML
882 /* Disable eDMA. The disable bit auto clears. */
883 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
8b260248 884
b562468c
ML
885 /* Wait for the chip to confirm eDMA is off. */
886 for (i = 10000; i > 0; i--) {
887 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 888 if (!(reg & EDMA_EN))
b562468c
ML
889 return 0;
890 udelay(10);
31961943 891 }
b562468c 892 return -EIO;
20f733e7
BR
893}
894
e12bef50 895static int mv_stop_edma(struct ata_port *ap)
0ea9e179 896{
b562468c
ML
897 void __iomem *port_mmio = mv_ap_base(ap);
898 struct mv_port_priv *pp = ap->private_data;
0ea9e179 899
b562468c
ML
900 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
901 return 0;
902 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
903 if (mv_stop_edma_engine(port_mmio)) {
904 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
905 return -EIO;
906 }
907 return 0;
0ea9e179
JG
908}
909
8a70f8dc 910#ifdef ATA_DEBUG
31961943 911static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 912{
31961943
BR
913 int b, w;
914 for (b = 0; b < bytes; ) {
915 DPRINTK("%p: ", start + b);
916 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 917 printk("%08x ", readl(start + b));
31961943
BR
918 b += sizeof(u32);
919 }
920 printk("\n");
921 }
31961943 922}
8a70f8dc
JG
923#endif
924
31961943
BR
925static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
926{
927#ifdef ATA_DEBUG
928 int b, w;
929 u32 dw;
930 for (b = 0; b < bytes; ) {
931 DPRINTK("%02x: ", b);
932 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
933 (void) pci_read_config_dword(pdev, b, &dw);
934 printk("%08x ", dw);
31961943
BR
935 b += sizeof(u32);
936 }
937 printk("\n");
938 }
939#endif
940}
941static void mv_dump_all_regs(void __iomem *mmio_base, int port,
942 struct pci_dev *pdev)
943{
944#ifdef ATA_DEBUG
8b260248 945 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
946 port >> MV_PORT_HC_SHIFT);
947 void __iomem *port_base;
948 int start_port, num_ports, p, start_hc, num_hcs, hc;
949
950 if (0 > port) {
951 start_hc = start_port = 0;
952 num_ports = 8; /* shld be benign for 4 port devs */
953 num_hcs = 2;
954 } else {
955 start_hc = port >> MV_PORT_HC_SHIFT;
956 start_port = port;
957 num_ports = num_hcs = 1;
958 }
8b260248 959 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
960 num_ports > 1 ? num_ports - 1 : start_port);
961
962 if (NULL != pdev) {
963 DPRINTK("PCI config space regs:\n");
964 mv_dump_pci_cfg(pdev, 0x68);
965 }
966 DPRINTK("PCI regs:\n");
967 mv_dump_mem(mmio_base+0xc00, 0x3c);
968 mv_dump_mem(mmio_base+0xd00, 0x34);
969 mv_dump_mem(mmio_base+0xf00, 0x4);
970 mv_dump_mem(mmio_base+0x1d00, 0x6c);
971 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 972 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
973 DPRINTK("HC regs (HC %i):\n", hc);
974 mv_dump_mem(hc_base, 0x1c);
975 }
976 for (p = start_port; p < start_port + num_ports; p++) {
977 port_base = mv_port_base(mmio_base, p);
2dcb407e 978 DPRINTK("EDMA regs (port %i):\n", p);
31961943 979 mv_dump_mem(port_base, 0x54);
2dcb407e 980 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
981 mv_dump_mem(port_base+0x300, 0x60);
982 }
983#endif
20f733e7
BR
984}
985
986static unsigned int mv_scr_offset(unsigned int sc_reg_in)
987{
988 unsigned int ofs;
989
990 switch (sc_reg_in) {
991 case SCR_STATUS:
992 case SCR_CONTROL:
993 case SCR_ERROR:
994 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
995 break;
996 case SCR_ACTIVE:
997 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
998 break;
999 default:
1000 ofs = 0xffffffffU;
1001 break;
1002 }
1003 return ofs;
1004}
1005
da3dbb17 1006static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1007{
1008 unsigned int ofs = mv_scr_offset(sc_reg_in);
1009
da3dbb17
TH
1010 if (ofs != 0xffffffffU) {
1011 *val = readl(mv_ap_base(ap) + ofs);
1012 return 0;
1013 } else
1014 return -EINVAL;
20f733e7
BR
1015}
1016
da3dbb17 1017static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1018{
1019 unsigned int ofs = mv_scr_offset(sc_reg_in);
1020
da3dbb17 1021 if (ofs != 0xffffffffU) {
20f733e7 1022 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1023 return 0;
1024 } else
1025 return -EINVAL;
20f733e7
BR
1026}
1027
f273827e
ML
1028static void mv6_dev_config(struct ata_device *adev)
1029{
1030 /*
e49856d8
ML
1031 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1032 *
1033 * Gen-II does not support NCQ over a port multiplier
1034 * (no FIS-based switching).
1035 *
f273827e
ML
1036 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1037 * See mv_qc_prep() for more info.
1038 */
e49856d8 1039 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1040 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1041 adev->flags &= ~ATA_DFLAG_NCQ;
352fab70
ML
1042 ata_dev_printk(adev, KERN_INFO,
1043 "NCQ disabled for command-based switching\n");
1044 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1045 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1046 ata_dev_printk(adev, KERN_INFO,
1047 "max_sectors limited to %u for NCQ\n",
1048 adev->max_sectors);
1049 }
e49856d8 1050 }
f273827e
ML
1051}
1052
e49856d8
ML
1053static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1054{
1055 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1056 /*
1057 * Various bit settings required for operation
1058 * in FIS-based switching (fbs) mode on GenIIe:
1059 */
1060 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1061 old_ltmode = readl(port_mmio + LTMODE_OFS);
1062 if (enable_fbs) {
1063 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1064 new_ltmode = old_ltmode | LTMODE_BIT8;
1065 } else { /* disable fbs */
1066 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1067 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1068 }
1069 if (new_fcfg != old_fcfg)
1070 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1071 if (new_ltmode != old_ltmode)
1072 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
f273827e
ML
1073}
1074
e12bef50 1075static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
e4e7b892 1076{
0c58912e 1077 u32 cfg;
e12bef50
ML
1078 struct mv_port_priv *pp = ap->private_data;
1079 struct mv_host_priv *hpriv = ap->host->private_data;
1080 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1081
1082 /* set up non-NCQ EDMA configuration */
0c58912e 1083 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1084
0c58912e 1085 if (IS_GEN_I(hpriv))
e4e7b892
JG
1086 cfg |= (1 << 8); /* enab config burst size mask */
1087
0c58912e 1088 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1089 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1090
1091 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1092 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1093 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1094 cfg |= (1 << 18); /* enab early completion */
e728eabe 1095 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e49856d8
ML
1096
1097 if (want_ncq && sata_pmp_attached(ap)) {
1098 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1099 mv_config_fbs(port_mmio, 1);
1100 } else {
1101 mv_config_fbs(port_mmio, 0);
1102 }
e4e7b892
JG
1103 }
1104
72109168
ML
1105 if (want_ncq) {
1106 cfg |= EDMA_CFG_NCQ;
1107 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1108 } else
1109 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1110
e4e7b892
JG
1111 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1112}
1113
da2fa9ba
ML
1114static void mv_port_free_dma_mem(struct ata_port *ap)
1115{
1116 struct mv_host_priv *hpriv = ap->host->private_data;
1117 struct mv_port_priv *pp = ap->private_data;
eb73d558 1118 int tag;
da2fa9ba
ML
1119
1120 if (pp->crqb) {
1121 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1122 pp->crqb = NULL;
1123 }
1124 if (pp->crpb) {
1125 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1126 pp->crpb = NULL;
1127 }
eb73d558
ML
1128 /*
1129 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1130 * For later hardware, we have one unique sg_tbl per NCQ tag.
1131 */
1132 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1133 if (pp->sg_tbl[tag]) {
1134 if (tag == 0 || !IS_GEN_I(hpriv))
1135 dma_pool_free(hpriv->sg_tbl_pool,
1136 pp->sg_tbl[tag],
1137 pp->sg_tbl_dma[tag]);
1138 pp->sg_tbl[tag] = NULL;
1139 }
da2fa9ba
ML
1140 }
1141}
1142
05b308e1
BR
1143/**
1144 * mv_port_start - Port specific init/start routine.
1145 * @ap: ATA channel to manipulate
1146 *
1147 * Allocate and point to DMA memory, init port private memory,
1148 * zero indices.
1149 *
1150 * LOCKING:
1151 * Inherited from caller.
1152 */
31961943
BR
1153static int mv_port_start(struct ata_port *ap)
1154{
cca3974e
JG
1155 struct device *dev = ap->host->dev;
1156 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1157 struct mv_port_priv *pp;
dde20207 1158 int tag;
31961943 1159
24dc5f33 1160 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1161 if (!pp)
24dc5f33 1162 return -ENOMEM;
da2fa9ba 1163 ap->private_data = pp;
31961943 1164
da2fa9ba
ML
1165 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1166 if (!pp->crqb)
1167 return -ENOMEM;
1168 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1169
da2fa9ba
ML
1170 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1171 if (!pp->crpb)
1172 goto out_port_free_dma_mem;
1173 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1174
eb73d558
ML
1175 /*
1176 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1177 * For later hardware, we need one unique sg_tbl per NCQ tag.
1178 */
1179 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1180 if (tag == 0 || !IS_GEN_I(hpriv)) {
1181 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1182 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1183 if (!pp->sg_tbl[tag])
1184 goto out_port_free_dma_mem;
1185 } else {
1186 pp->sg_tbl[tag] = pp->sg_tbl[0];
1187 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1188 }
1189 }
31961943 1190 return 0;
da2fa9ba
ML
1191
1192out_port_free_dma_mem:
1193 mv_port_free_dma_mem(ap);
1194 return -ENOMEM;
31961943
BR
1195}
1196
05b308e1
BR
1197/**
1198 * mv_port_stop - Port specific cleanup/stop routine.
1199 * @ap: ATA channel to manipulate
1200 *
1201 * Stop DMA, cleanup port memory.
1202 *
1203 * LOCKING:
cca3974e 1204 * This routine uses the host lock to protect the DMA stop.
05b308e1 1205 */
31961943
BR
1206static void mv_port_stop(struct ata_port *ap)
1207{
e12bef50 1208 mv_stop_edma(ap);
da2fa9ba 1209 mv_port_free_dma_mem(ap);
31961943
BR
1210}
1211
05b308e1
BR
1212/**
1213 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1214 * @qc: queued command whose SG list to source from
1215 *
1216 * Populate the SG list and mark the last entry.
1217 *
1218 * LOCKING:
1219 * Inherited from caller.
1220 */
6c08772e 1221static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1222{
1223 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1224 struct scatterlist *sg;
3be6cbd7 1225 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1226 unsigned int si;
31961943 1227
eb73d558 1228 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1229 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1230 dma_addr_t addr = sg_dma_address(sg);
1231 u32 sg_len = sg_dma_len(sg);
22374677 1232
4007b493
OJ
1233 while (sg_len) {
1234 u32 offset = addr & 0xffff;
1235 u32 len = sg_len;
22374677 1236
4007b493
OJ
1237 if ((offset + sg_len > 0x10000))
1238 len = 0x10000 - offset;
1239
1240 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1241 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1242 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1243
1244 sg_len -= len;
1245 addr += len;
1246
3be6cbd7 1247 last_sg = mv_sg;
4007b493 1248 mv_sg++;
4007b493 1249 }
31961943 1250 }
3be6cbd7
JG
1251
1252 if (likely(last_sg))
1253 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1254}
1255
5796d1c4 1256static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1257{
559eedad 1258 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1259 (last ? CRQB_CMD_LAST : 0);
559eedad 1260 *cmdw = cpu_to_le16(tmp);
31961943
BR
1261}
1262
05b308e1
BR
1263/**
1264 * mv_qc_prep - Host specific command preparation.
1265 * @qc: queued command to prepare
1266 *
1267 * This routine simply redirects to the general purpose routine
1268 * if command is not DMA. Else, it handles prep of the CRQB
1269 * (command request block), does some sanity checking, and calls
1270 * the SG load routine.
1271 *
1272 * LOCKING:
1273 * Inherited from caller.
1274 */
31961943
BR
1275static void mv_qc_prep(struct ata_queued_cmd *qc)
1276{
1277 struct ata_port *ap = qc->ap;
1278 struct mv_port_priv *pp = ap->private_data;
e1469874 1279 __le16 *cw;
31961943
BR
1280 struct ata_taskfile *tf;
1281 u16 flags = 0;
a6432436 1282 unsigned in_index;
31961943 1283
138bfdd0
ML
1284 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1285 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1286 return;
20f733e7 1287
31961943
BR
1288 /* Fill in command request block
1289 */
e4e7b892 1290 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1291 flags |= CRQB_FLAG_READ;
beec7dbc 1292 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1293 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 1294 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 1295
bdd4ddde
JG
1296 /* get current queue index from software */
1297 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1298
1299 pp->crqb[in_index].sg_addr =
eb73d558 1300 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1301 pp->crqb[in_index].sg_addr_hi =
eb73d558 1302 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1303 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1304
a6432436 1305 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1306 tf = &qc->tf;
1307
1308 /* Sadly, the CRQB cannot accomodate all registers--there are
1309 * only 11 bytes...so we must pick and choose required
1310 * registers based on the command. So, we drop feature and
1311 * hob_feature for [RW] DMA commands, but they are needed for
1312 * NCQ. NCQ will drop hob_nsect.
20f733e7 1313 */
31961943
BR
1314 switch (tf->command) {
1315 case ATA_CMD_READ:
1316 case ATA_CMD_READ_EXT:
1317 case ATA_CMD_WRITE:
1318 case ATA_CMD_WRITE_EXT:
c15d85c8 1319 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1320 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1321 break;
31961943
BR
1322 case ATA_CMD_FPDMA_READ:
1323 case ATA_CMD_FPDMA_WRITE:
8b260248 1324 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1325 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1326 break;
31961943
BR
1327 default:
1328 /* The only other commands EDMA supports in non-queued and
1329 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1330 * of which are defined/used by Linux. If we get here, this
1331 * driver needs work.
1332 *
1333 * FIXME: modify libata to give qc_prep a return value and
1334 * return error here.
1335 */
1336 BUG_ON(tf->command);
1337 break;
1338 }
1339 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1340 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1341 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1342 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1343 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1344 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1345 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1346 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1347 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1348
e4e7b892
JG
1349 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1350 return;
1351 mv_fill_sg(qc);
1352}
1353
1354/**
1355 * mv_qc_prep_iie - Host specific command preparation.
1356 * @qc: queued command to prepare
1357 *
1358 * This routine simply redirects to the general purpose routine
1359 * if command is not DMA. Else, it handles prep of the CRQB
1360 * (command request block), does some sanity checking, and calls
1361 * the SG load routine.
1362 *
1363 * LOCKING:
1364 * Inherited from caller.
1365 */
1366static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1367{
1368 struct ata_port *ap = qc->ap;
1369 struct mv_port_priv *pp = ap->private_data;
1370 struct mv_crqb_iie *crqb;
1371 struct ata_taskfile *tf;
a6432436 1372 unsigned in_index;
e4e7b892
JG
1373 u32 flags = 0;
1374
138bfdd0
ML
1375 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1376 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1377 return;
1378
e12bef50 1379 /* Fill in Gen IIE command request block */
e4e7b892
JG
1380 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1381 flags |= CRQB_FLAG_READ;
1382
beec7dbc 1383 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1384 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1385 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 1386 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 1387
bdd4ddde
JG
1388 /* get current queue index from software */
1389 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1390
1391 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1392 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1393 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1394 crqb->flags = cpu_to_le32(flags);
1395
1396 tf = &qc->tf;
1397 crqb->ata_cmd[0] = cpu_to_le32(
1398 (tf->command << 16) |
1399 (tf->feature << 24)
1400 );
1401 crqb->ata_cmd[1] = cpu_to_le32(
1402 (tf->lbal << 0) |
1403 (tf->lbam << 8) |
1404 (tf->lbah << 16) |
1405 (tf->device << 24)
1406 );
1407 crqb->ata_cmd[2] = cpu_to_le32(
1408 (tf->hob_lbal << 0) |
1409 (tf->hob_lbam << 8) |
1410 (tf->hob_lbah << 16) |
1411 (tf->hob_feature << 24)
1412 );
1413 crqb->ata_cmd[3] = cpu_to_le32(
1414 (tf->nsect << 0) |
1415 (tf->hob_nsect << 8)
1416 );
1417
1418 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1419 return;
31961943
BR
1420 mv_fill_sg(qc);
1421}
1422
05b308e1
BR
1423/**
1424 * mv_qc_issue - Initiate a command to the host
1425 * @qc: queued command to start
1426 *
1427 * This routine simply redirects to the general purpose routine
1428 * if command is not DMA. Else, it sanity checks our local
1429 * caches of the request producer/consumer indices then enables
1430 * DMA and bumps the request producer index.
1431 *
1432 * LOCKING:
1433 * Inherited from caller.
1434 */
9a3d9eb0 1435static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1436{
c5d3e45a
JG
1437 struct ata_port *ap = qc->ap;
1438 void __iomem *port_mmio = mv_ap_base(ap);
1439 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1440 u32 in_index;
31961943 1441
138bfdd0
ML
1442 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1443 (qc->tf.protocol != ATA_PROT_NCQ)) {
17c5aab5
ML
1444 /*
1445 * We're about to send a non-EDMA capable command to the
31961943
BR
1446 * port. Turn off EDMA so there won't be problems accessing
1447 * shadow block, etc registers.
1448 */
b562468c 1449 mv_stop_edma(ap);
e49856d8 1450 mv_pmp_select(ap, qc->dev->link->pmp);
9363c382 1451 return ata_sff_qc_issue(qc);
31961943
BR
1452 }
1453
72109168 1454 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1455
bdd4ddde 1456 pp->req_idx++;
31961943 1457
bdd4ddde 1458 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1459
1460 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1461 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1462 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1463
1464 return 0;
1465}
1466
05b308e1
BR
1467/**
1468 * mv_err_intr - Handle error interrupts on the port
1469 * @ap: ATA channel to manipulate
9b358e30 1470 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1471 *
1472 * In most cases, just clear the interrupt and move on. However,
e12bef50
ML
1473 * some cases require an eDMA reset, which also performs a COMRESET.
1474 * The SERR case requires a clear of pending errors in the SATA
1475 * SERROR register. Finally, if the port disabled DMA,
1476 * update our cached copy to match.
05b308e1
BR
1477 *
1478 * LOCKING:
1479 * Inherited from caller.
1480 */
bdd4ddde 1481static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1482{
1483 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1484 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1485 struct mv_port_priv *pp = ap->private_data;
1486 struct mv_host_priv *hpriv = ap->host->private_data;
1487 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1488 unsigned int action = 0, err_mask = 0;
9af5c9c9 1489 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1490
bdd4ddde 1491 ata_ehi_clear_desc(ehi);
20f733e7 1492
bdd4ddde
JG
1493 if (!edma_enabled) {
1494 /* just a guess: do we need to do this? should we
1495 * expand this, and do it in all cases?
1496 */
936fd732
TH
1497 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1498 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1499 }
bdd4ddde
JG
1500
1501 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1502
352fab70 1503 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
bdd4ddde
JG
1504
1505 /*
352fab70 1506 * All generations share these EDMA error cause bits:
bdd4ddde 1507 */
bdd4ddde
JG
1508 if (edma_err_cause & EDMA_ERR_DEV)
1509 err_mask |= AC_ERR_DEV;
1510 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1511 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1512 EDMA_ERR_INTRL_PAR)) {
1513 err_mask |= AC_ERR_ATA_BUS;
cf480626 1514 action |= ATA_EH_RESET;
b64bbc39 1515 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1516 }
1517 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1518 ata_ehi_hotplugged(ehi);
1519 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1520 "dev disconnect" : "dev connect");
cf480626 1521 action |= ATA_EH_RESET;
bdd4ddde
JG
1522 }
1523
352fab70
ML
1524 /*
1525 * Gen-I has a different SELF_DIS bit,
1526 * different FREEZE bits, and no SERR bit:
1527 */
ee9ccdf7 1528 if (IS_GEN_I(hpriv)) {
bdd4ddde 1529 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 1530 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1532 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1533 }
1534 } else {
1535 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 1536 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 1537 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1538 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 1539 }
bdd4ddde 1540 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1541 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1542 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1543 err_mask = AC_ERR_ATA_BUS;
cf480626 1544 action |= ATA_EH_RESET;
bdd4ddde 1545 }
afb0edd9 1546 }
20f733e7
BR
1547
1548 /* Clear EDMA now that SERR cleanup done */
3606a380 1549 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1550
bdd4ddde
JG
1551 if (!err_mask) {
1552 err_mask = AC_ERR_OTHER;
cf480626 1553 action |= ATA_EH_RESET;
bdd4ddde
JG
1554 }
1555
1556 ehi->serror |= serr;
1557 ehi->action |= action;
1558
1559 if (qc)
1560 qc->err_mask |= err_mask;
1561 else
1562 ehi->err_mask |= err_mask;
1563
1564 if (edma_err_cause & eh_freeze_mask)
1565 ata_port_freeze(ap);
1566 else
1567 ata_port_abort(ap);
1568}
1569
1570static void mv_intr_pio(struct ata_port *ap)
1571{
1572 struct ata_queued_cmd *qc;
1573 u8 ata_status;
1574
1575 /* ignore spurious intr if drive still BUSY */
1576 ata_status = readb(ap->ioaddr.status_addr);
1577 if (unlikely(ata_status & ATA_BUSY))
1578 return;
1579
1580 /* get active ATA command */
9af5c9c9 1581 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1582 if (unlikely(!qc)) /* no active tag */
1583 return;
1584 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1585 return;
1586
1587 /* and finally, complete the ATA command */
1588 qc->err_mask |= ac_err_mask(ata_status);
1589 ata_qc_complete(qc);
1590}
1591
1592static void mv_intr_edma(struct ata_port *ap)
1593{
1594 void __iomem *port_mmio = mv_ap_base(ap);
1595 struct mv_host_priv *hpriv = ap->host->private_data;
1596 struct mv_port_priv *pp = ap->private_data;
1597 struct ata_queued_cmd *qc;
1598 u32 out_index, in_index;
1599 bool work_done = false;
1600
1601 /* get h/w response queue pointer */
1602 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1603 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1604
1605 while (1) {
1606 u16 status;
6c1153e0 1607 unsigned int tag;
bdd4ddde
JG
1608
1609 /* get s/w response queue last-read pointer, and compare */
1610 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1611 if (in_index == out_index)
1612 break;
1613
bdd4ddde 1614 /* 50xx: get active ATA command */
0ea9e179 1615 if (IS_GEN_I(hpriv))
9af5c9c9 1616 tag = ap->link.active_tag;
bdd4ddde 1617
6c1153e0
JG
1618 /* Gen II/IIE: get active ATA command via tag, to enable
1619 * support for queueing. this works transparently for
1620 * queued and non-queued modes.
bdd4ddde 1621 */
8c0aeb4a
ML
1622 else
1623 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1624
6c1153e0 1625 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1626
cb924419
ML
1627 /* For non-NCQ mode, the lower 8 bits of status
1628 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1629 * which should be zero if all went well.
bdd4ddde
JG
1630 */
1631 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1632 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1633 mv_err_intr(ap, qc);
1634 return;
1635 }
1636
1637 /* and finally, complete the ATA command */
1638 if (qc) {
1639 qc->err_mask |=
1640 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1641 ata_qc_complete(qc);
1642 }
1643
0ea9e179 1644 /* advance software response queue pointer, to
bdd4ddde
JG
1645 * indicate (after the loop completes) to hardware
1646 * that we have consumed a response queue entry.
1647 */
1648 work_done = true;
1649 pp->resp_idx++;
1650 }
1651
352fab70 1652 /* Update the software queue position index in hardware */
bdd4ddde
JG
1653 if (work_done)
1654 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1655 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1656 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1657}
1658
05b308e1
BR
1659/**
1660 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1661 * @host: host specific structure
05b308e1
BR
1662 * @relevant: port error bits relevant to this host controller
1663 * @hc: which host controller we're to look at
1664 *
1665 * Read then write clear the HC interrupt status then walk each
1666 * port connected to the HC and see if it needs servicing. Port
1667 * success ints are reported in the HC interrupt status reg, the
1668 * port error ints are reported in the higher level main
1669 * interrupt status register and thus are passed in via the
1670 * 'relevant' argument.
1671 *
1672 * LOCKING:
1673 * Inherited from caller.
1674 */
cca3974e 1675static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1676{
f351b2d6
SB
1677 struct mv_host_priv *hpriv = host->private_data;
1678 void __iomem *mmio = hpriv->base;
20f733e7 1679 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1680 u32 hc_irq_cause;
f351b2d6 1681 int port, port0, last_port;
20f733e7 1682
35177265 1683 if (hc == 0)
20f733e7 1684 port0 = 0;
35177265 1685 else
20f733e7 1686 port0 = MV_PORTS_PER_HC;
20f733e7 1687
f351b2d6
SB
1688 if (HAS_PCI(host))
1689 last_port = port0 + MV_PORTS_PER_HC;
1690 else
1691 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1692 /* we'll need the HC success int register in most cases */
1693 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1694 if (!hc_irq_cause)
1695 return;
1696
1697 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1698
1699 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1700 hc, relevant, hc_irq_cause);
20f733e7 1701
8f71efe2 1702 for (port = port0; port < last_port; port++) {
cca3974e 1703 struct ata_port *ap = host->ports[port];
8f71efe2 1704 struct mv_port_priv *pp;
352fab70 1705 int have_err_bits, hardport, shift;
55d8ca4f 1706
bdd4ddde 1707 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1708 continue;
1709
8f71efe2
YL
1710 pp = ap->private_data;
1711
31961943 1712 shift = port << 1; /* (port * 2) */
e12bef50 1713 if (port >= MV_PORTS_PER_HC)
20f733e7 1714 shift++; /* skip bit 8 in the HC Main IRQ reg */
e12bef50 1715
352fab70 1716 have_err_bits = ((ERR_IRQ << shift) & relevant);
bdd4ddde
JG
1717
1718 if (unlikely(have_err_bits)) {
1719 struct ata_queued_cmd *qc;
8b260248 1720
9af5c9c9 1721 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1722 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1723 continue;
1724
1725 mv_err_intr(ap, qc);
1726 continue;
1727 }
1728
352fab70 1729 hardport = mv_hardport_from_port(port); /* range 0..3 */
bdd4ddde
JG
1730
1731 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
352fab70 1732 if ((DMA_IRQ << hardport) & hc_irq_cause)
bdd4ddde
JG
1733 mv_intr_edma(ap);
1734 } else {
352fab70 1735 if ((DEV_IRQ << hardport) & hc_irq_cause)
bdd4ddde 1736 mv_intr_pio(ap);
20f733e7
BR
1737 }
1738 }
1739 VPRINTK("EXIT\n");
1740}
1741
bdd4ddde
JG
1742static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1743{
02a121da 1744 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1745 struct ata_port *ap;
1746 struct ata_queued_cmd *qc;
1747 struct ata_eh_info *ehi;
1748 unsigned int i, err_mask, printed = 0;
1749 u32 err_cause;
1750
02a121da 1751 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1752
1753 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1754 err_cause);
1755
1756 DPRINTK("All regs @ PCI error\n");
1757 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1758
02a121da 1759 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1760
1761 for (i = 0; i < host->n_ports; i++) {
1762 ap = host->ports[i];
936fd732 1763 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1764 ehi = &ap->link.eh_info;
bdd4ddde
JG
1765 ata_ehi_clear_desc(ehi);
1766 if (!printed++)
1767 ata_ehi_push_desc(ehi,
1768 "PCI err cause 0x%08x", err_cause);
1769 err_mask = AC_ERR_HOST_BUS;
cf480626 1770 ehi->action = ATA_EH_RESET;
9af5c9c9 1771 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1772 if (qc)
1773 qc->err_mask |= err_mask;
1774 else
1775 ehi->err_mask |= err_mask;
1776
1777 ata_port_freeze(ap);
1778 }
1779 }
1780}
1781
05b308e1 1782/**
c5d3e45a 1783 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1784 * @irq: unused
1785 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1786 *
1787 * Read the read only register to determine if any host
1788 * controllers have pending interrupts. If so, call lower level
1789 * routine to handle. Also check for PCI errors which are only
1790 * reported here.
1791 *
8b260248 1792 * LOCKING:
cca3974e 1793 * This routine holds the host lock while processing pending
05b308e1
BR
1794 * interrupts.
1795 */
7d12e780 1796static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1797{
cca3974e 1798 struct ata_host *host = dev_instance;
f351b2d6 1799 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1800 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1801 void __iomem *mmio = hpriv->base;
352fab70 1802 u32 main_cause, main_mask;
20f733e7 1803
646a4da5 1804 spin_lock(&host->lock);
352fab70
ML
1805 main_cause = readl(hpriv->main_cause_reg_addr);
1806 main_mask = readl(hpriv->main_mask_reg_addr);
1807 /*
1808 * Deal with cases where we either have nothing pending, or have read
1809 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 1810 */
352fab70 1811 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
646a4da5 1812 goto out_unlock;
20f733e7 1813
cca3974e 1814 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1815
352fab70 1816 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1817 mv_pci_error(host, mmio);
1818 handled = 1;
1819 goto out_unlock; /* skip all other HC irq handling */
1820 }
1821
20f733e7 1822 for (hc = 0; hc < n_hcs; hc++) {
352fab70 1823 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
20f733e7 1824 if (relevant) {
cca3974e 1825 mv_host_intr(host, relevant, hc);
bdd4ddde 1826 handled = 1;
20f733e7
BR
1827 }
1828 }
615ab953 1829
bdd4ddde 1830out_unlock:
cca3974e 1831 spin_unlock(&host->lock);
20f733e7
BR
1832 return IRQ_RETVAL(handled);
1833}
1834
c9d39130
JG
1835static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1836{
1837 unsigned int ofs;
1838
1839 switch (sc_reg_in) {
1840 case SCR_STATUS:
1841 case SCR_ERROR:
1842 case SCR_CONTROL:
1843 ofs = sc_reg_in * sizeof(u32);
1844 break;
1845 default:
1846 ofs = 0xffffffffU;
1847 break;
1848 }
1849 return ofs;
1850}
1851
da3dbb17 1852static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1853{
f351b2d6
SB
1854 struct mv_host_priv *hpriv = ap->host->private_data;
1855 void __iomem *mmio = hpriv->base;
0d5ff566 1856 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1857 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1858
da3dbb17
TH
1859 if (ofs != 0xffffffffU) {
1860 *val = readl(addr + ofs);
1861 return 0;
1862 } else
1863 return -EINVAL;
c9d39130
JG
1864}
1865
da3dbb17 1866static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1867{
f351b2d6
SB
1868 struct mv_host_priv *hpriv = ap->host->private_data;
1869 void __iomem *mmio = hpriv->base;
0d5ff566 1870 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1871 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1872
da3dbb17 1873 if (ofs != 0xffffffffU) {
0d5ff566 1874 writelfl(val, addr + ofs);
da3dbb17
TH
1875 return 0;
1876 } else
1877 return -EINVAL;
c9d39130
JG
1878}
1879
7bb3c529 1880static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1881{
7bb3c529 1882 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1883 int early_5080;
1884
44c10138 1885 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1886
1887 if (!early_5080) {
1888 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1889 tmp |= (1 << 0);
1890 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1891 }
1892
7bb3c529 1893 mv_reset_pci_bus(host, mmio);
522479fb
JG
1894}
1895
1896static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1897{
1898 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1899}
1900
47c2b677 1901static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1902 void __iomem *mmio)
1903{
c9d39130
JG
1904 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1905 u32 tmp;
1906
1907 tmp = readl(phy_mmio + MV5_PHY_MODE);
1908
1909 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1910 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1911}
1912
47c2b677 1913static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1914{
522479fb
JG
1915 u32 tmp;
1916
1917 writel(0, mmio + MV_GPIO_PORT_CTL);
1918
1919 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1920
1921 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1922 tmp |= ~(1 << 0);
1923 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1924}
1925
2a47ce06
JG
1926static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1927 unsigned int port)
bca1c4eb 1928{
c9d39130
JG
1929 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1930 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1931 u32 tmp;
1932 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1933
1934 if (fix_apm_sq) {
1935 tmp = readl(phy_mmio + MV5_LT_MODE);
1936 tmp |= (1 << 19);
1937 writel(tmp, phy_mmio + MV5_LT_MODE);
1938
1939 tmp = readl(phy_mmio + MV5_PHY_CTL);
1940 tmp &= ~0x3;
1941 tmp |= 0x1;
1942 writel(tmp, phy_mmio + MV5_PHY_CTL);
1943 }
1944
1945 tmp = readl(phy_mmio + MV5_PHY_MODE);
1946 tmp &= ~mask;
1947 tmp |= hpriv->signal[port].pre;
1948 tmp |= hpriv->signal[port].amps;
1949 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1950}
1951
c9d39130
JG
1952
1953#undef ZERO
1954#define ZERO(reg) writel(0, port_mmio + (reg))
1955static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int port)
1957{
1958 void __iomem *port_mmio = mv_port_base(mmio, port);
1959
b562468c
ML
1960 /*
1961 * The datasheet warns against setting ATA_RST when EDMA is active
1962 * (but doesn't say what the problem might be). So we first try
1963 * to disable the EDMA engine before doing the ATA_RST operation.
1964 */
e12bef50 1965 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
1966
1967 ZERO(0x028); /* command */
1968 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1969 ZERO(0x004); /* timer */
1970 ZERO(0x008); /* irq err cause */
1971 ZERO(0x00c); /* irq err mask */
1972 ZERO(0x010); /* rq bah */
1973 ZERO(0x014); /* rq inp */
1974 ZERO(0x018); /* rq outp */
1975 ZERO(0x01c); /* respq bah */
1976 ZERO(0x024); /* respq outp */
1977 ZERO(0x020); /* respq inp */
1978 ZERO(0x02c); /* test control */
1979 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1980}
1981#undef ZERO
1982
1983#define ZERO(reg) writel(0, hc_mmio + (reg))
1984static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1985 unsigned int hc)
47c2b677 1986{
c9d39130
JG
1987 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1988 u32 tmp;
1989
1990 ZERO(0x00c);
1991 ZERO(0x010);
1992 ZERO(0x014);
1993 ZERO(0x018);
1994
1995 tmp = readl(hc_mmio + 0x20);
1996 tmp &= 0x1c1c1c1c;
1997 tmp |= 0x03030303;
1998 writel(tmp, hc_mmio + 0x20);
1999}
2000#undef ZERO
2001
2002static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2003 unsigned int n_hc)
2004{
2005 unsigned int hc, port;
2006
2007 for (hc = 0; hc < n_hc; hc++) {
2008 for (port = 0; port < MV_PORTS_PER_HC; port++)
2009 mv5_reset_hc_port(hpriv, mmio,
2010 (hc * MV_PORTS_PER_HC) + port);
2011
2012 mv5_reset_one_hc(hpriv, mmio, hc);
2013 }
2014
2015 return 0;
47c2b677
JG
2016}
2017
101ffae2
JG
2018#undef ZERO
2019#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2020static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2021{
02a121da 2022 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2023 u32 tmp;
2024
2025 tmp = readl(mmio + MV_PCI_MODE);
2026 tmp &= 0xff00ffff;
2027 writel(tmp, mmio + MV_PCI_MODE);
2028
2029 ZERO(MV_PCI_DISC_TIMER);
2030 ZERO(MV_PCI_MSI_TRIGGER);
2031 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2032 ZERO(HC_MAIN_IRQ_MASK_OFS);
2033 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2034 ZERO(hpriv->irq_cause_ofs);
2035 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2036 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2037 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2038 ZERO(MV_PCI_ERR_ATTRIBUTE);
2039 ZERO(MV_PCI_ERR_COMMAND);
2040}
2041#undef ZERO
2042
2043static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2044{
2045 u32 tmp;
2046
2047 mv5_reset_flash(hpriv, mmio);
2048
2049 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2050 tmp &= 0x3;
2051 tmp |= (1 << 5) | (1 << 6);
2052 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2053}
2054
2055/**
2056 * mv6_reset_hc - Perform the 6xxx global soft reset
2057 * @mmio: base address of the HBA
2058 *
2059 * This routine only applies to 6xxx parts.
2060 *
2061 * LOCKING:
2062 * Inherited from caller.
2063 */
c9d39130
JG
2064static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2065 unsigned int n_hc)
101ffae2
JG
2066{
2067 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2068 int i, rc = 0;
2069 u32 t;
2070
2071 /* Following procedure defined in PCI "main command and status
2072 * register" table.
2073 */
2074 t = readl(reg);
2075 writel(t | STOP_PCI_MASTER, reg);
2076
2077 for (i = 0; i < 1000; i++) {
2078 udelay(1);
2079 t = readl(reg);
2dcb407e 2080 if (PCI_MASTER_EMPTY & t)
101ffae2 2081 break;
101ffae2
JG
2082 }
2083 if (!(PCI_MASTER_EMPTY & t)) {
2084 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2085 rc = 1;
2086 goto done;
2087 }
2088
2089 /* set reset */
2090 i = 5;
2091 do {
2092 writel(t | GLOB_SFT_RST, reg);
2093 t = readl(reg);
2094 udelay(1);
2095 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2096
2097 if (!(GLOB_SFT_RST & t)) {
2098 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2099 rc = 1;
2100 goto done;
2101 }
2102
2103 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2104 i = 5;
2105 do {
2106 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2107 t = readl(reg);
2108 udelay(1);
2109 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2110
2111 if (GLOB_SFT_RST & t) {
2112 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2113 rc = 1;
2114 }
094e50b2
ML
2115 /*
2116 * Temporary: wait 3 seconds before port-probing can happen,
2117 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2118 * This can go away once hotplug is fully/correctly implemented.
2119 */
2120 if (rc == 0)
2121 msleep(3000);
101ffae2
JG
2122done:
2123 return rc;
2124}
2125
47c2b677 2126static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2127 void __iomem *mmio)
2128{
2129 void __iomem *port_mmio;
2130 u32 tmp;
2131
ba3fe8fb
JG
2132 tmp = readl(mmio + MV_RESET_CFG);
2133 if ((tmp & (1 << 0)) == 0) {
47c2b677 2134 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2135 hpriv->signal[idx].pre = 0x1 << 5;
2136 return;
2137 }
2138
2139 port_mmio = mv_port_base(mmio, idx);
2140 tmp = readl(port_mmio + PHY_MODE2);
2141
2142 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2143 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2144}
2145
47c2b677 2146static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2147{
47c2b677 2148 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2149}
2150
c9d39130 2151static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2152 unsigned int port)
bca1c4eb 2153{
c9d39130
JG
2154 void __iomem *port_mmio = mv_port_base(mmio, port);
2155
bca1c4eb 2156 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2157 int fix_phy_mode2 =
2158 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2159 int fix_phy_mode4 =
47c2b677
JG
2160 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2161 u32 m2, tmp;
2162
2163 if (fix_phy_mode2) {
2164 m2 = readl(port_mmio + PHY_MODE2);
2165 m2 &= ~(1 << 16);
2166 m2 |= (1 << 31);
2167 writel(m2, port_mmio + PHY_MODE2);
2168
2169 udelay(200);
2170
2171 m2 = readl(port_mmio + PHY_MODE2);
2172 m2 &= ~((1 << 16) | (1 << 31));
2173 writel(m2, port_mmio + PHY_MODE2);
2174
2175 udelay(200);
2176 }
2177
2178 /* who knows what this magic does */
2179 tmp = readl(port_mmio + PHY_MODE3);
2180 tmp &= ~0x7F800000;
2181 tmp |= 0x2A800000;
2182 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2183
2184 if (fix_phy_mode4) {
47c2b677 2185 u32 m4;
bca1c4eb
JG
2186
2187 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2188
2189 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2190 tmp = readl(port_mmio + PHY_MODE3);
bca1c4eb 2191
e12bef50 2192 /* workaround for errata FEr SATA#10 (part 1) */
bca1c4eb
JG
2193 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2194
2195 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2196
2197 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2198 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2199 }
2200
2201 /* Revert values of pre-emphasis and signal amps to the saved ones */
2202 m2 = readl(port_mmio + PHY_MODE2);
2203
2204 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2205 m2 |= hpriv->signal[port].amps;
2206 m2 |= hpriv->signal[port].pre;
47c2b677 2207 m2 &= ~(1 << 16);
bca1c4eb 2208
e4e7b892
JG
2209 /* according to mvSata 3.6.1, some IIE values are fixed */
2210 if (IS_GEN_IIE(hpriv)) {
2211 m2 &= ~0xC30FF01F;
2212 m2 |= 0x0000900F;
2213 }
2214
bca1c4eb
JG
2215 writel(m2, port_mmio + PHY_MODE2);
2216}
2217
f351b2d6
SB
2218/* TODO: use the generic LED interface to configure the SATA Presence */
2219/* & Acitivy LEDs on the board */
2220static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2221 void __iomem *mmio)
2222{
2223 return;
2224}
2225
2226static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2227 void __iomem *mmio)
2228{
2229 void __iomem *port_mmio;
2230 u32 tmp;
2231
2232 port_mmio = mv_port_base(mmio, idx);
2233 tmp = readl(port_mmio + PHY_MODE2);
2234
2235 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2236 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2237}
2238
2239#undef ZERO
2240#define ZERO(reg) writel(0, port_mmio + (reg))
2241static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2242 void __iomem *mmio, unsigned int port)
2243{
2244 void __iomem *port_mmio = mv_port_base(mmio, port);
2245
b562468c
ML
2246 /*
2247 * The datasheet warns against setting ATA_RST when EDMA is active
2248 * (but doesn't say what the problem might be). So we first try
2249 * to disable the EDMA engine before doing the ATA_RST operation.
2250 */
e12bef50 2251 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
2252
2253 ZERO(0x028); /* command */
2254 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2255 ZERO(0x004); /* timer */
2256 ZERO(0x008); /* irq err cause */
2257 ZERO(0x00c); /* irq err mask */
2258 ZERO(0x010); /* rq bah */
2259 ZERO(0x014); /* rq inp */
2260 ZERO(0x018); /* rq outp */
2261 ZERO(0x01c); /* respq bah */
2262 ZERO(0x024); /* respq outp */
2263 ZERO(0x020); /* respq inp */
2264 ZERO(0x02c); /* test control */
2265 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2266}
2267
2268#undef ZERO
2269
2270#define ZERO(reg) writel(0, hc_mmio + (reg))
2271static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2272 void __iomem *mmio)
2273{
2274 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2275
2276 ZERO(0x00c);
2277 ZERO(0x010);
2278 ZERO(0x014);
2279
2280}
2281
2282#undef ZERO
2283
2284static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio, unsigned int n_hc)
2286{
2287 unsigned int port;
2288
2289 for (port = 0; port < hpriv->n_ports; port++)
2290 mv_soc_reset_hc_port(hpriv, mmio, port);
2291
2292 mv_soc_reset_one_hc(hpriv, mmio);
2293
2294 return 0;
2295}
2296
2297static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2298 void __iomem *mmio)
2299{
2300 return;
2301}
2302
2303static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2304{
2305 return;
2306}
2307
b67a1064
ML
2308static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2309{
2310 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2311
2312 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2313 if (want_gen2i)
2314 ifctl |= (1 << 7); /* enable gen2i speed */
2315 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2316}
2317
b562468c
ML
2318/*
2319 * Caller must ensure that EDMA is not active,
2320 * by first doing mv_stop_edma() where needed.
2321 */
e12bef50 2322static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
2323 unsigned int port_no)
2324{
2325 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2326
0d8be5cb 2327 mv_stop_edma_engine(port_mmio);
c9d39130
JG
2328 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2329
b67a1064
ML
2330 if (!IS_GEN_I(hpriv)) {
2331 /* Enable 3.0gb/s link speed */
2332 mv_setup_ifctl(port_mmio, 1);
c9d39130 2333 }
b67a1064
ML
2334 /*
2335 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2336 * link, and physical layers. It resets all SATA interface registers
2337 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
c9d39130 2338 */
b67a1064
ML
2339 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2340 udelay(25); /* allow reset propagation */
c9d39130
JG
2341 writelfl(0, port_mmio + EDMA_CMD_OFS);
2342
2343 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2344
ee9ccdf7 2345 if (IS_GEN_I(hpriv))
c9d39130
JG
2346 mdelay(1);
2347}
2348
e49856d8 2349static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 2350{
e49856d8
ML
2351 if (sata_pmp_supported(ap)) {
2352 void __iomem *port_mmio = mv_ap_base(ap);
2353 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2354 int old = reg & 0xf;
22374677 2355
e49856d8
ML
2356 if (old != pmp) {
2357 reg = (reg & ~0xf) | pmp;
2358 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2359 }
22374677 2360 }
20f733e7
BR
2361}
2362
e49856d8
ML
2363static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
22374677 2365{
e49856d8
ML
2366 mv_pmp_select(link->ap, sata_srst_pmp(link));
2367 return sata_std_hardreset(link, class, deadline);
2368}
bdd4ddde 2369
e49856d8
ML
2370static int mv_softreset(struct ata_link *link, unsigned int *class,
2371 unsigned long deadline)
2372{
2373 mv_pmp_select(link->ap, sata_srst_pmp(link));
2374 return ata_sff_softreset(link, class, deadline);
22374677
JG
2375}
2376
cc0680a5 2377static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2378 unsigned long deadline)
31961943 2379{
cc0680a5 2380 struct ata_port *ap = link->ap;
bdd4ddde 2381 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 2382 struct mv_port_priv *pp = ap->private_data;
f351b2d6 2383 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
2384 int rc, attempts = 0, extra = 0;
2385 u32 sstatus;
2386 bool online;
31961943 2387
e12bef50 2388 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 2389 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
bdd4ddde 2390
0d8be5cb
ML
2391 /* Workaround for errata FEr SATA#10 (part 2) */
2392 do {
17c5aab5
ML
2393 const unsigned long *timing =
2394 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 2395
17c5aab5
ML
2396 rc = sata_link_hardreset(link, timing, deadline + extra,
2397 &online, NULL);
2398 if (rc)
0d8be5cb 2399 return rc;
0d8be5cb
ML
2400 sata_scr_read(link, SCR_STATUS, &sstatus);
2401 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2402 /* Force 1.5gb/s link speed and try again */
2403 mv_setup_ifctl(mv_ap_base(ap), 0);
2404 if (time_after(jiffies + HZ, deadline))
2405 extra = HZ; /* only extend it once, max */
2406 }
2407 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
bdd4ddde 2408
17c5aab5 2409 return rc;
bdd4ddde
JG
2410}
2411
bdd4ddde
JG
2412static void mv_eh_freeze(struct ata_port *ap)
2413{
f351b2d6 2414 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 2415 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
bdd4ddde 2416 unsigned int shift;
352fab70 2417 u32 main_mask;
bdd4ddde
JG
2418
2419 /* FIXME: handle coalescing completion events properly */
2420
2421 shift = ap->port_no * 2;
2422 if (hc > 0)
2423 shift++;
2424
bdd4ddde 2425 /* disable assertion of portN err, done events */
352fab70
ML
2426 main_mask = readl(hpriv->main_mask_reg_addr);
2427 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2428 writelfl(main_mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2429}
2430
2431static void mv_eh_thaw(struct ata_port *ap)
2432{
f351b2d6
SB
2433 struct mv_host_priv *hpriv = ap->host->private_data;
2434 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2435 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2436 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2437 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde 2438 unsigned int shift, hc_port_no = ap->port_no;
352fab70 2439 u32 main_mask, hc_irq_cause;
bdd4ddde
JG
2440
2441 /* FIXME: handle coalescing completion events properly */
2442
2443 shift = ap->port_no * 2;
2444 if (hc > 0) {
2445 shift++;
2446 hc_port_no -= 4;
2447 }
2448
bdd4ddde
JG
2449 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2451
2452 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
352fab70 2454 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hc_port_no);
bdd4ddde
JG
2455 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2456
2457 /* enable assertion of portN err, done events */
352fab70
ML
2458 main_mask = readl(hpriv->main_mask_reg_addr);
2459 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2460 writelfl(main_mask, hpriv->main_mask_reg_addr);
31961943
BR
2461}
2462
05b308e1
BR
2463/**
2464 * mv_port_init - Perform some early initialization on a single port.
2465 * @port: libata data structure storing shadow register addresses
2466 * @port_mmio: base address of the port
2467 *
2468 * Initialize shadow register mmio addresses, clear outstanding
2469 * interrupts on the port, and unmask interrupts for the future
2470 * start of the port.
2471 *
2472 * LOCKING:
2473 * Inherited from caller.
2474 */
31961943 2475static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2476{
0d5ff566 2477 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2478 unsigned serr_ofs;
2479
8b260248 2480 /* PIO related setup
31961943
BR
2481 */
2482 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2483 port->error_addr =
31961943
BR
2484 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2485 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2486 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2487 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2488 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2489 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2490 port->status_addr =
31961943
BR
2491 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2492 /* special case: control/altstatus doesn't have ATA_REG_ address */
2493 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2494
2495 /* unused: */
8d9db2d2 2496 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2497
31961943
BR
2498 /* Clear any currently outstanding port interrupt conditions */
2499 serr_ofs = mv_scr_offset(SCR_ERROR);
2500 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2501 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2502
646a4da5
ML
2503 /* unmask all non-transient EDMA error interrupts */
2504 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2505
8b260248 2506 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2507 readl(port_mmio + EDMA_CFG_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2509 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2510}
2511
4447d351 2512static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2513{
4447d351
TH
2514 struct pci_dev *pdev = to_pci_dev(host->dev);
2515 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2516 u32 hp_flags = hpriv->hp_flags;
2517
5796d1c4 2518 switch (board_idx) {
47c2b677
JG
2519 case chip_5080:
2520 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2521 hp_flags |= MV_HP_GEN_I;
47c2b677 2522
44c10138 2523 switch (pdev->revision) {
47c2b677
JG
2524 case 0x1:
2525 hp_flags |= MV_HP_ERRATA_50XXB0;
2526 break;
2527 case 0x3:
2528 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 break;
2530 default:
2531 dev_printk(KERN_WARNING, &pdev->dev,
2532 "Applying 50XXB2 workarounds to unknown rev\n");
2533 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 break;
2535 }
2536 break;
2537
bca1c4eb
JG
2538 case chip_504x:
2539 case chip_508x:
47c2b677 2540 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2541 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2542
44c10138 2543 switch (pdev->revision) {
47c2b677
JG
2544 case 0x0:
2545 hp_flags |= MV_HP_ERRATA_50XXB0;
2546 break;
2547 case 0x3:
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 default:
2551 dev_printk(KERN_WARNING, &pdev->dev,
2552 "Applying B2 workarounds to unknown rev\n");
2553 hp_flags |= MV_HP_ERRATA_50XXB2;
2554 break;
bca1c4eb
JG
2555 }
2556 break;
2557
2558 case chip_604x:
2559 case chip_608x:
47c2b677 2560 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2561 hp_flags |= MV_HP_GEN_II;
47c2b677 2562
44c10138 2563 switch (pdev->revision) {
47c2b677
JG
2564 case 0x7:
2565 hp_flags |= MV_HP_ERRATA_60X1B2;
2566 break;
2567 case 0x9:
2568 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2569 break;
2570 default:
2571 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2572 "Applying B2 workarounds to unknown rev\n");
2573 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2574 break;
2575 }
2576 break;
2577
e4e7b892 2578 case chip_7042:
02a121da 2579 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2580 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2581 (pdev->device == 0x2300 || pdev->device == 0x2310))
2582 {
4e520033
ML
2583 /*
2584 * Highpoint RocketRAID PCIe 23xx series cards:
2585 *
2586 * Unconfigured drives are treated as "Legacy"
2587 * by the BIOS, and it overwrites sector 8 with
2588 * a "Lgcy" metadata block prior to Linux boot.
2589 *
2590 * Configured drives (RAID or JBOD) leave sector 8
2591 * alone, but instead overwrite a high numbered
2592 * sector for the RAID metadata. This sector can
2593 * be determined exactly, by truncating the physical
2594 * drive capacity to a nice even GB value.
2595 *
2596 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2597 *
2598 * Warn the user, lest they think we're just buggy.
2599 */
2600 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2601 " BIOS CORRUPTS DATA on all attached drives,"
2602 " regardless of if/how they are configured."
2603 " BEWARE!\n");
2604 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2605 " use sectors 8-9 on \"Legacy\" drives,"
2606 " and avoid the final two gigabytes on"
2607 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2608 }
e4e7b892
JG
2609 case chip_6042:
2610 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2611 hp_flags |= MV_HP_GEN_IIE;
2612
44c10138 2613 switch (pdev->revision) {
e4e7b892
JG
2614 case 0x0:
2615 hp_flags |= MV_HP_ERRATA_XX42A0;
2616 break;
2617 case 0x1:
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2619 break;
2620 default:
2621 dev_printk(KERN_WARNING, &pdev->dev,
2622 "Applying 60X1C0 workarounds to unknown rev\n");
2623 hp_flags |= MV_HP_ERRATA_60X1C0;
2624 break;
2625 }
2626 break;
f351b2d6
SB
2627 case chip_soc:
2628 hpriv->ops = &mv_soc_ops;
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2630 break;
e4e7b892 2631
bca1c4eb 2632 default:
f351b2d6 2633 dev_printk(KERN_ERR, host->dev,
5796d1c4 2634 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2635 return 1;
2636 }
2637
2638 hpriv->hp_flags = hp_flags;
02a121da
ML
2639 if (hp_flags & MV_HP_PCIE) {
2640 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2643 } else {
2644 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2645 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2646 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2647 }
bca1c4eb
JG
2648
2649 return 0;
2650}
2651
05b308e1 2652/**
47c2b677 2653 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2654 * @host: ATA host to initialize
2655 * @board_idx: controller index
05b308e1
BR
2656 *
2657 * If possible, do an early global reset of the host. Then do
2658 * our port init and clear/unmask all/relevant host interrupts.
2659 *
2660 * LOCKING:
2661 * Inherited from caller.
2662 */
4447d351 2663static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2664{
2665 int rc = 0, n_hc, port, hc;
4447d351 2666 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2667 void __iomem *mmio = hpriv->base;
47c2b677 2668
4447d351 2669 rc = mv_chip_id(host, board_idx);
bca1c4eb 2670 if (rc)
352fab70 2671 goto done;
f351b2d6
SB
2672
2673 if (HAS_PCI(host)) {
352fab70
ML
2674 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2675 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
f351b2d6 2676 } else {
352fab70
ML
2677 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2678 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
f351b2d6 2679 }
352fab70
ML
2680
2681 /* global interrupt mask: 0 == mask everything */
f351b2d6 2682 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2683
4447d351 2684 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2685
4447d351 2686 for (port = 0; port < host->n_ports; port++)
47c2b677 2687 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2688
c9d39130 2689 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2690 if (rc)
20f733e7 2691 goto done;
20f733e7 2692
522479fb 2693 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2694 hpriv->ops->reset_bus(host, mmio);
47c2b677 2695 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2696
4447d351 2697 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2698 struct ata_port *ap = host->ports[port];
2a47ce06 2699 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2700
2701 mv_port_init(&ap->ioaddr, port_mmio);
2702
7bb3c529 2703#ifdef CONFIG_PCI
f351b2d6
SB
2704 if (HAS_PCI(host)) {
2705 unsigned int offset = port_mmio - mmio;
2706 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2707 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2708 }
7bb3c529 2709#endif
20f733e7
BR
2710 }
2711
2712 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2713 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2714
2715 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2716 "(before clear)=0x%08x\n", hc,
2717 readl(hc_mmio + HC_CFG_OFS),
2718 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2719
2720 /* Clear any currently outstanding hc interrupt conditions */
2721 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2722 }
2723
f351b2d6
SB
2724 if (HAS_PCI(host)) {
2725 /* Clear any currently outstanding host interrupt conditions */
2726 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2727
f351b2d6
SB
2728 /* and unmask interrupt generation for host regs */
2729 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2730 if (IS_GEN_I(hpriv))
2731 writelfl(~HC_MAIN_MASKED_IRQS_5,
2732 hpriv->main_mask_reg_addr);
2733 else
2734 writelfl(~HC_MAIN_MASKED_IRQS,
2735 hpriv->main_mask_reg_addr);
2736
2737 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2738 "PCI int cause/mask=0x%08x/0x%08x\n",
2739 readl(hpriv->main_cause_reg_addr),
2740 readl(hpriv->main_mask_reg_addr),
2741 readl(mmio + hpriv->irq_cause_ofs),
2742 readl(mmio + hpriv->irq_mask_ofs));
2743 } else {
2744 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2745 hpriv->main_mask_reg_addr);
2746 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2747 readl(hpriv->main_cause_reg_addr),
2748 readl(hpriv->main_mask_reg_addr));
2749 }
2750done:
2751 return rc;
2752}
fb621e2f 2753
fbf14e2f
BB
2754static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2755{
2756 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2757 MV_CRQB_Q_SZ, 0);
2758 if (!hpriv->crqb_pool)
2759 return -ENOMEM;
2760
2761 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2762 MV_CRPB_Q_SZ, 0);
2763 if (!hpriv->crpb_pool)
2764 return -ENOMEM;
2765
2766 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2767 MV_SG_TBL_SZ, 0);
2768 if (!hpriv->sg_tbl_pool)
2769 return -ENOMEM;
2770
2771 return 0;
2772}
2773
15a32632
LB
2774static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2775 struct mbus_dram_target_info *dram)
2776{
2777 int i;
2778
2779 for (i = 0; i < 4; i++) {
2780 writel(0, hpriv->base + WINDOW_CTRL(i));
2781 writel(0, hpriv->base + WINDOW_BASE(i));
2782 }
2783
2784 for (i = 0; i < dram->num_cs; i++) {
2785 struct mbus_dram_window *cs = dram->cs + i;
2786
2787 writel(((cs->size - 1) & 0xffff0000) |
2788 (cs->mbus_attr << 8) |
2789 (dram->mbus_dram_target_id << 4) | 1,
2790 hpriv->base + WINDOW_CTRL(i));
2791 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2792 }
2793}
2794
f351b2d6
SB
2795/**
2796 * mv_platform_probe - handle a positive probe of an soc Marvell
2797 * host
2798 * @pdev: platform device found
2799 *
2800 * LOCKING:
2801 * Inherited from caller.
2802 */
2803static int mv_platform_probe(struct platform_device *pdev)
2804{
2805 static int printed_version;
2806 const struct mv_sata_platform_data *mv_platform_data;
2807 const struct ata_port_info *ppi[] =
2808 { &mv_port_info[chip_soc], NULL };
2809 struct ata_host *host;
2810 struct mv_host_priv *hpriv;
2811 struct resource *res;
2812 int n_ports, rc;
20f733e7 2813
f351b2d6
SB
2814 if (!printed_version++)
2815 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2816
f351b2d6
SB
2817 /*
2818 * Simple resource validation ..
2819 */
2820 if (unlikely(pdev->num_resources != 2)) {
2821 dev_err(&pdev->dev, "invalid number of resources\n");
2822 return -EINVAL;
2823 }
2824
2825 /*
2826 * Get the register base first
2827 */
2828 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2829 if (res == NULL)
2830 return -EINVAL;
2831
2832 /* allocate host */
2833 mv_platform_data = pdev->dev.platform_data;
2834 n_ports = mv_platform_data->n_ports;
2835
2836 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2837 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2838
2839 if (!host || !hpriv)
2840 return -ENOMEM;
2841 host->private_data = hpriv;
2842 hpriv->n_ports = n_ports;
2843
2844 host->iomap = NULL;
f1cb0ea1
SB
2845 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2846 res->end - res->start + 1);
f351b2d6
SB
2847 hpriv->base -= MV_SATAHC0_REG_BASE;
2848
15a32632
LB
2849 /*
2850 * (Re-)program MBUS remapping windows if we are asked to.
2851 */
2852 if (mv_platform_data->dram != NULL)
2853 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2854
fbf14e2f
BB
2855 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2856 if (rc)
2857 return rc;
2858
f351b2d6
SB
2859 /* initialize adapter */
2860 rc = mv_init_host(host, chip_soc);
2861 if (rc)
2862 return rc;
2863
2864 dev_printk(KERN_INFO, &pdev->dev,
2865 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2866 host->n_ports);
2867
2868 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2869 IRQF_SHARED, &mv6_sht);
2870}
2871
2872/*
2873 *
2874 * mv_platform_remove - unplug a platform interface
2875 * @pdev: platform device
2876 *
2877 * A platform bus SATA device has been unplugged. Perform the needed
2878 * cleanup. Also called on module unload for any active devices.
2879 */
2880static int __devexit mv_platform_remove(struct platform_device *pdev)
2881{
2882 struct device *dev = &pdev->dev;
2883 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2884
2885 ata_host_detach(host);
f351b2d6 2886 return 0;
20f733e7
BR
2887}
2888
f351b2d6
SB
2889static struct platform_driver mv_platform_driver = {
2890 .probe = mv_platform_probe,
2891 .remove = __devexit_p(mv_platform_remove),
2892 .driver = {
2893 .name = DRV_NAME,
2894 .owner = THIS_MODULE,
2895 },
2896};
2897
2898
7bb3c529 2899#ifdef CONFIG_PCI
f351b2d6
SB
2900static int mv_pci_init_one(struct pci_dev *pdev,
2901 const struct pci_device_id *ent);
2902
7bb3c529
SB
2903
2904static struct pci_driver mv_pci_driver = {
2905 .name = DRV_NAME,
2906 .id_table = mv_pci_tbl,
f351b2d6 2907 .probe = mv_pci_init_one,
7bb3c529
SB
2908 .remove = ata_pci_remove_one,
2909};
2910
2911/*
2912 * module options
2913 */
2914static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2915
2916
2917/* move to PCI layer or libata core? */
2918static int pci_go_64(struct pci_dev *pdev)
2919{
2920 int rc;
2921
2922 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2923 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2924 if (rc) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2926 if (rc) {
2927 dev_printk(KERN_ERR, &pdev->dev,
2928 "64-bit DMA enable failed\n");
2929 return rc;
2930 }
2931 }
2932 } else {
2933 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2934 if (rc) {
2935 dev_printk(KERN_ERR, &pdev->dev,
2936 "32-bit DMA enable failed\n");
2937 return rc;
2938 }
2939 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2940 if (rc) {
2941 dev_printk(KERN_ERR, &pdev->dev,
2942 "32-bit consistent DMA enable failed\n");
2943 return rc;
2944 }
2945 }
2946
2947 return rc;
2948}
2949
05b308e1
BR
2950/**
2951 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2952 * @host: ATA host to print info about
05b308e1
BR
2953 *
2954 * FIXME: complete this.
2955 *
2956 * LOCKING:
2957 * Inherited from caller.
2958 */
4447d351 2959static void mv_print_info(struct ata_host *host)
31961943 2960{
4447d351
TH
2961 struct pci_dev *pdev = to_pci_dev(host->dev);
2962 struct mv_host_priv *hpriv = host->private_data;
44c10138 2963 u8 scc;
c1e4fe71 2964 const char *scc_s, *gen;
31961943
BR
2965
2966 /* Use this to determine the HW stepping of the chip so we know
2967 * what errata to workaround
2968 */
31961943
BR
2969 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2970 if (scc == 0)
2971 scc_s = "SCSI";
2972 else if (scc == 0x01)
2973 scc_s = "RAID";
2974 else
c1e4fe71
JG
2975 scc_s = "?";
2976
2977 if (IS_GEN_I(hpriv))
2978 gen = "I";
2979 else if (IS_GEN_II(hpriv))
2980 gen = "II";
2981 else if (IS_GEN_IIE(hpriv))
2982 gen = "IIE";
2983 else
2984 gen = "?";
31961943 2985
a9524a76 2986 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2987 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2988 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2989 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2990}
2991
05b308e1 2992/**
f351b2d6 2993 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
2994 * @pdev: PCI device found
2995 * @ent: PCI device ID entry for the matched host
2996 *
2997 * LOCKING:
2998 * Inherited from caller.
2999 */
f351b2d6
SB
3000static int mv_pci_init_one(struct pci_dev *pdev,
3001 const struct pci_device_id *ent)
20f733e7 3002{
2dcb407e 3003 static int printed_version;
20f733e7 3004 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3005 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3006 struct ata_host *host;
3007 struct mv_host_priv *hpriv;
3008 int n_ports, rc;
20f733e7 3009
a9524a76
JG
3010 if (!printed_version++)
3011 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3012
4447d351
TH
3013 /* allocate host */
3014 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3015
3016 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3017 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3018 if (!host || !hpriv)
3019 return -ENOMEM;
3020 host->private_data = hpriv;
f351b2d6 3021 hpriv->n_ports = n_ports;
4447d351
TH
3022
3023 /* acquire resources */
24dc5f33
TH
3024 rc = pcim_enable_device(pdev);
3025 if (rc)
20f733e7 3026 return rc;
20f733e7 3027
0d5ff566
TH
3028 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3029 if (rc == -EBUSY)
24dc5f33 3030 pcim_pin_device(pdev);
0d5ff566 3031 if (rc)
24dc5f33 3032 return rc;
4447d351 3033 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3034 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3035
d88184fb
JG
3036 rc = pci_go_64(pdev);
3037 if (rc)
3038 return rc;
3039
da2fa9ba
ML
3040 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3041 if (rc)
3042 return rc;
3043
20f733e7 3044 /* initialize adapter */
4447d351 3045 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3046 if (rc)
3047 return rc;
20f733e7 3048
31961943 3049 /* Enable interrupts */
6a59dcf8 3050 if (msi && pci_enable_msi(pdev))
31961943 3051 pci_intx(pdev, 1);
20f733e7 3052
31961943 3053 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3054 mv_print_info(host);
20f733e7 3055
4447d351 3056 pci_set_master(pdev);
ea8b4db9 3057 pci_try_set_mwi(pdev);
4447d351 3058 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3059 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3060}
7bb3c529 3061#endif
20f733e7 3062
f351b2d6
SB
3063static int mv_platform_probe(struct platform_device *pdev);
3064static int __devexit mv_platform_remove(struct platform_device *pdev);
3065
20f733e7
BR
3066static int __init mv_init(void)
3067{
7bb3c529
SB
3068 int rc = -ENODEV;
3069#ifdef CONFIG_PCI
3070 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3071 if (rc < 0)
3072 return rc;
3073#endif
3074 rc = platform_driver_register(&mv_platform_driver);
3075
3076#ifdef CONFIG_PCI
3077 if (rc < 0)
3078 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3079#endif
3080 return rc;
20f733e7
BR
3081}
3082
3083static void __exit mv_exit(void)
3084{
7bb3c529 3085#ifdef CONFIG_PCI
20f733e7 3086 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3087#endif
f351b2d6 3088 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3089}
3090
3091MODULE_AUTHOR("Brett Russ");
3092MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3093MODULE_LICENSE("GPL");
3094MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3095MODULE_VERSION(DRV_VERSION);
17c5aab5 3096MODULE_ALIAS("platform:" DRV_NAME);
20f733e7 3097
7bb3c529 3098#ifdef CONFIG_PCI
ddef9bb3
JG
3099module_param(msi, int, 0444);
3100MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3101#endif
ddef9bb3 3102
20f733e7
BR
3103module_init(mv_init);
3104module_exit(mv_exit);