pasemic_mac*: Move the PA Semi driver
[linux-2.6-block.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
40f21b11 4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7 7 *
40f21b11
ML
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
20f733e7
BR
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
4a05e209 28/*
85afb934
ML
29 * sata_mv TODO list:
30 *
85afb934
ML
31 * --> Develop a low-power-consumption strategy, and implement it.
32 *
2b748a0a 33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
85afb934
ML
34 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
4a05e209 42
65ad7fef
ML
43/*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
20f733e7
BR
52#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
8d8b6004 59#include <linux/dmapool.h>
20f733e7 60#include <linux/dma-mapping.h>
a9524a76 61#include <linux/device.h>
c77a2f4e 62#include <linux/clk.h>
f351b2d6
SB
63#include <linux/platform_device.h>
64#include <linux/ata_platform.h>
15a32632 65#include <linux/mbus.h>
c46938cc 66#include <linux/bitops.h>
5a0e3ad6 67#include <linux/gfp.h>
20f733e7 68#include <scsi/scsi_host.h>
193515d5 69#include <scsi/scsi_cmnd.h>
6c08772e 70#include <scsi/scsi_device.h>
20f733e7 71#include <linux/libata.h>
20f733e7
BR
72
73#define DRV_NAME "sata_mv"
cae5a29d 74#define DRV_VERSION "1.28"
20f733e7 75
40f21b11
ML
76/*
77 * module options
78 */
79
80static int msi;
81#ifdef CONFIG_PCI
82module_param(msi, int, S_IRUGO);
83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
84#endif
85
2b748a0a
ML
86static int irq_coalescing_io_count;
87module_param(irq_coalescing_io_count, int, S_IRUGO);
88MODULE_PARM_DESC(irq_coalescing_io_count,
89 "IRQ coalescing I/O count threshold (0..255)");
90
91static int irq_coalescing_usecs;
92module_param(irq_coalescing_usecs, int, S_IRUGO);
93MODULE_PARM_DESC(irq_coalescing_usecs,
94 "IRQ coalescing time threshold in usecs");
95
20f733e7
BR
96enum {
97 /* BAR's are enumerated in terms of pci_resource_start() terms */
98 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
99 MV_IO_BAR = 2, /* offset 0x18: IO space */
100 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
101
102 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
103 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
104
2b748a0a
ML
105 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
106 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
108 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
109
20f733e7 110 MV_PCI_REG_BASE = 0,
615ab953 111
2b748a0a
ML
112 /*
113 * Per-chip ("all ports") interrupt coalescing feature.
114 * This is only for GEN_II / GEN_IIE hardware.
115 *
116 * Coalescing defers the interrupt until either the IO_THRESHOLD
117 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
118 */
cae5a29d
ML
119 COAL_REG_BASE = 0x18000,
120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
2b748a0a
ML
121 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
122
cae5a29d
ML
123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
2b748a0a
ML
125
126 /*
127 * Registers for the (unused here) transaction coalescing feature:
128 */
cae5a29d
ML
129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
2b748a0a 131
cae5a29d
ML
132 SATAHC0_REG_BASE = 0x20000,
133 FLASH_CTL = 0x1046c,
134 GPIO_PORT_CTL = 0x104f0,
135 RESET_CFG = 0x180d8,
20f733e7
BR
136
137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
141
31961943
BR
142 MV_MAX_Q_DEPTH = 32,
143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
144
145 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
146 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
147 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
148 */
149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 151 MV_MAX_SG_CT = 256,
31961943 152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 153
352fab70 154 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 155 MV_PORT_HC_SHIFT = 2,
352fab70
ML
156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
157 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
159
160 /* Host Flags */
161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
7bb3c529 162
9cbe056f 163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
ad3aef51 164
91b1a84c 165 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
20f733e7 166
40f21b11
ML
167 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
168 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
91b1a84c
ML
169
170 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
ad3aef51 171
31961943
BR
172 CRQB_FLAG_READ = (1 << 0),
173 CRQB_TAG_SHIFT = 1,
c5d3e45a 174 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 175 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 176 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
177 CRQB_CMD_ADDR_SHIFT = 8,
178 CRQB_CMD_CS = (0x2 << 11),
179 CRQB_CMD_LAST = (1 << 15),
180
181 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
182 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
183 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
184
185 EPRD_FLAG_END_OF_TBL = (1 << 31),
186
20f733e7
BR
187 /* PCI interface registers */
188
cae5a29d
ML
189 MV_PCI_COMMAND = 0xc00,
190 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
191 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
31961943 192
cae5a29d 193 PCI_MAIN_CMD_STS = 0xd30,
20f733e7
BR
194 STOP_PCI_MASTER = (1 << 2),
195 PCI_MASTER_EMPTY = (1 << 3),
196 GLOB_SFT_RST = (1 << 4),
197
cae5a29d 198 MV_PCI_MODE = 0xd00,
8e7decdb
ML
199 MV_PCI_MODE_MASK = 0x30,
200
522479fb
JG
201 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
202 MV_PCI_DISC_TIMER = 0xd04,
203 MV_PCI_MSI_TRIGGER = 0xc38,
204 MV_PCI_SERR_MASK = 0xc28,
cae5a29d 205 MV_PCI_XBAR_TMOUT = 0x1d04,
522479fb
JG
206 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
207 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
208 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
209 MV_PCI_ERR_COMMAND = 0x1d50,
210
cae5a29d
ML
211 PCI_IRQ_CAUSE = 0x1d58,
212 PCI_IRQ_MASK = 0x1d5c,
20f733e7
BR
213 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
214
cae5a29d
ML
215 PCIE_IRQ_CAUSE = 0x1900,
216 PCIE_IRQ_MASK = 0x1910,
646a4da5 217 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 218
7368f919 219 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
cae5a29d
ML
220 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
221 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
222 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
223 SOC_HC_MAIN_IRQ_MASK = 0x20024,
40f21b11
ML
224 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
225 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
20f733e7
BR
226 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
227 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
2b748a0a
ML
228 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
229 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
20f733e7 230 PCI_ERR = (1 << 18),
40f21b11
ML
231 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
232 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
233 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
234 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
235 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
20f733e7
BR
236 GPIO_INT = (1 << 22),
237 SELF_INT = (1 << 23),
238 TWSI_INT = (1 << 24),
239 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 240 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 241 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
20f733e7
BR
242
243 /* SATAHC registers */
cae5a29d 244 HC_CFG = 0x00,
20f733e7 245
cae5a29d 246 HC_IRQ_CAUSE = 0x14,
352fab70
ML
247 DMA_IRQ = (1 << 0), /* shift by port # */
248 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
249 DEV_IRQ = (1 << 8), /* shift by port # */
250
2b748a0a
ML
251 /*
252 * Per-HC (Host-Controller) interrupt coalescing feature.
253 * This is present on all chip generations.
254 *
255 * Coalescing defers the interrupt until either the IO_THRESHOLD
256 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
257 */
cae5a29d
ML
258 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
259 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
2b748a0a 260
cae5a29d 261 SOC_LED_CTRL = 0x2c,
000b344f
ML
262 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
263 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
264 /* with dev activity LED */
265
20f733e7 266 /* Shadow block registers */
cae5a29d
ML
267 SHD_BLK = 0x100,
268 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
20f733e7
BR
269
270 /* SATA registers */
cae5a29d
ML
271 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
272 SATA_ACTIVE = 0x350,
273 FIS_IRQ_CAUSE = 0x364,
274 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
17c5aab5 275
cae5a29d 276 LTMODE = 0x30c, /* requires read-after-write */
17c5aab5
ML
277 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
278
cae5a29d 279 PHY_MODE2 = 0x330,
47c2b677 280 PHY_MODE3 = 0x310,
cae5a29d
ML
281
282 PHY_MODE4 = 0x314, /* requires read-after-write */
ba069e37
ML
283 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
284 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
285 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
286 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
287
cae5a29d
ML
288 SATA_IFCTL = 0x344,
289 SATA_TESTCTL = 0x348,
290 SATA_IFSTAT = 0x34c,
291 VENDOR_UNIQUE_FIS = 0x35c,
17c5aab5 292
cae5a29d 293 FISCFG = 0x360,
8e7decdb
ML
294 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
295 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
17c5aab5 296
29b7e43c
MM
297 PHY_MODE9_GEN2 = 0x398,
298 PHY_MODE9_GEN1 = 0x39c,
299 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
300
c9d39130 301 MV5_PHY_MODE = 0x74,
cae5a29d
ML
302 MV5_LTMODE = 0x30,
303 MV5_PHY_CTL = 0x0C,
304 SATA_IFCFG = 0x050,
bca1c4eb
JG
305
306 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
307
308 /* Port registers */
cae5a29d 309 EDMA_CFG = 0,
0c58912e
ML
310 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
311 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
312 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
313 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
314 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
315 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
316 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7 317
cae5a29d
ML
318 EDMA_ERR_IRQ_CAUSE = 0x8,
319 EDMA_ERR_IRQ_MASK = 0xc,
6c1153e0
JG
320 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
321 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
322 EDMA_ERR_DEV = (1 << 2), /* device error */
323 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
324 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
325 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
326 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
327 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 328 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 329 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
330 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
331 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
332 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
333 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 334
6c1153e0 335 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
336 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
337 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
338 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
339 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
340
6c1153e0 341 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 342
6c1153e0 343 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
344 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
345 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
346 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
347 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
348 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
349
6c1153e0 350 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 351
6c1153e0 352 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
353 EDMA_ERR_OVERRUN_5 = (1 << 5),
354 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
355
356 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
357 EDMA_ERR_LNK_CTRL_RX_1 |
358 EDMA_ERR_LNK_CTRL_RX_3 |
85afb934 359 EDMA_ERR_LNK_CTRL_TX,
646a4da5 360
bdd4ddde
JG
361 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
362 EDMA_ERR_PRD_PAR |
363 EDMA_ERR_DEV_DCON |
364 EDMA_ERR_DEV_CON |
365 EDMA_ERR_SERR |
366 EDMA_ERR_SELF_DIS |
6c1153e0 367 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
368 EDMA_ERR_CRPB_PAR |
369 EDMA_ERR_INTRL_PAR |
370 EDMA_ERR_IORDY |
371 EDMA_ERR_LNK_CTRL_RX_2 |
372 EDMA_ERR_LNK_DATA_RX |
373 EDMA_ERR_LNK_DATA_TX |
374 EDMA_ERR_TRANS_PROTO,
e12bef50 375
bdd4ddde
JG
376 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
377 EDMA_ERR_PRD_PAR |
378 EDMA_ERR_DEV_DCON |
379 EDMA_ERR_DEV_CON |
380 EDMA_ERR_OVERRUN_5 |
381 EDMA_ERR_UNDERRUN_5 |
382 EDMA_ERR_SELF_DIS_5 |
6c1153e0 383 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
384 EDMA_ERR_CRPB_PAR |
385 EDMA_ERR_INTRL_PAR |
386 EDMA_ERR_IORDY,
20f733e7 387
cae5a29d
ML
388 EDMA_REQ_Q_BASE_HI = 0x10,
389 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
31961943 390
cae5a29d 391 EDMA_REQ_Q_OUT_PTR = 0x18,
31961943
BR
392 EDMA_REQ_Q_PTR_SHIFT = 5,
393
cae5a29d
ML
394 EDMA_RSP_Q_BASE_HI = 0x1c,
395 EDMA_RSP_Q_IN_PTR = 0x20,
396 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
31961943
BR
397 EDMA_RSP_Q_PTR_SHIFT = 3,
398
cae5a29d 399 EDMA_CMD = 0x28, /* EDMA command register */
0ea9e179
JG
400 EDMA_EN = (1 << 0), /* enable EDMA */
401 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
8e7decdb
ML
402 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
403
cae5a29d 404 EDMA_STATUS = 0x30, /* EDMA engine status */
8e7decdb
ML
405 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
406 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
20f733e7 407
cae5a29d
ML
408 EDMA_IORDY_TMOUT = 0x34,
409 EDMA_ARB_CFG = 0x38,
8e7decdb 410
cae5a29d
ML
411 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
412 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
da14265e 413
cae5a29d
ML
414 BMDMA_CMD = 0x224, /* bmdma command register */
415 BMDMA_STATUS = 0x228, /* bmdma status register */
416 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
417 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
da14265e 418
31961943
BR
419 /* Host private flags (hp_flags) */
420 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
421 MV_HP_ERRATA_50XXB0 = (1 << 1),
422 MV_HP_ERRATA_50XXB2 = (1 << 2),
423 MV_HP_ERRATA_60X1B2 = (1 << 3),
424 MV_HP_ERRATA_60X1C0 = (1 << 4),
0ea9e179
JG
425 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
426 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
427 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 428 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
616d4a98 429 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
1f398472 430 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
000b344f 431 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
20f733e7 432
31961943 433 /* Port private flags (pp_flags) */
0ea9e179 434 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 435 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
00f42eab 436 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
29d187bb 437 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
d16ab3f6 438 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
20f733e7
BR
439};
440
ee9ccdf7
JG
441#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
442#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 443#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
8e7decdb 444#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
1f398472 445#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
bca1c4eb 446
15a32632
LB
447#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
448#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
449
095fec88 450enum {
baf14aa1
JG
451 /* DMA boundary 0xffff is required by the s/g splitting
452 * we need on /length/ in mv_fill-sg().
453 */
454 MV_DMA_BOUNDARY = 0xffffU,
095fec88 455
0ea9e179
JG
456 /* mask of register bits containing lower 32 bits
457 * of EDMA request queue DMA address
458 */
095fec88
JG
459 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
460
0ea9e179 461 /* ditto, for response queue */
095fec88
JG
462 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
463};
464
522479fb
JG
465enum chip_type {
466 chip_504x,
467 chip_508x,
468 chip_5080,
469 chip_604x,
470 chip_608x,
e4e7b892
JG
471 chip_6042,
472 chip_7042,
f351b2d6 473 chip_soc,
522479fb
JG
474};
475
31961943
BR
476/* Command ReQuest Block: 32B */
477struct mv_crqb {
e1469874
ML
478 __le32 sg_addr;
479 __le32 sg_addr_hi;
480 __le16 ctrl_flags;
481 __le16 ata_cmd[11];
31961943 482};
20f733e7 483
e4e7b892 484struct mv_crqb_iie {
e1469874
ML
485 __le32 addr;
486 __le32 addr_hi;
487 __le32 flags;
488 __le32 len;
489 __le32 ata_cmd[4];
e4e7b892
JG
490};
491
31961943
BR
492/* Command ResPonse Block: 8B */
493struct mv_crpb {
e1469874
ML
494 __le16 id;
495 __le16 flags;
496 __le32 tmstmp;
20f733e7
BR
497};
498
31961943
BR
499/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
500struct mv_sg {
e1469874
ML
501 __le32 addr;
502 __le32 flags_size;
503 __le32 addr_hi;
504 __le32 reserved;
31961943 505};
20f733e7 506
08da1759
ML
507/*
508 * We keep a local cache of a few frequently accessed port
509 * registers here, to avoid having to read them (very slow)
510 * when switching between EDMA and non-EDMA modes.
511 */
512struct mv_cached_regs {
513 u32 fiscfg;
514 u32 ltmode;
515 u32 haltcond;
c01e8a23 516 u32 unknown_rsvd;
08da1759
ML
517};
518
31961943
BR
519struct mv_port_priv {
520 struct mv_crqb *crqb;
521 dma_addr_t crqb_dma;
522 struct mv_crpb *crpb;
523 dma_addr_t crpb_dma;
eb73d558
ML
524 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
525 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
526
527 unsigned int req_idx;
528 unsigned int resp_idx;
529
31961943 530 u32 pp_flags;
08da1759 531 struct mv_cached_regs cached;
29d187bb 532 unsigned int delayed_eh_pmp_map;
31961943
BR
533};
534
bca1c4eb
JG
535struct mv_port_signal {
536 u32 amps;
537 u32 pre;
538};
539
02a121da
ML
540struct mv_host_priv {
541 u32 hp_flags;
1bfeff03 542 unsigned int board_idx;
96e2c487 543 u32 main_irq_mask;
02a121da
ML
544 struct mv_port_signal signal[8];
545 const struct mv_hw_ops *ops;
f351b2d6
SB
546 int n_ports;
547 void __iomem *base;
7368f919
ML
548 void __iomem *main_irq_cause_addr;
549 void __iomem *main_irq_mask_addr;
cae5a29d
ML
550 u32 irq_cause_offset;
551 u32 irq_mask_offset;
02a121da 552 u32 unmask_all_irqs;
c77a2f4e
SB
553
554#if defined(CONFIG_HAVE_CLK)
555 struct clk *clk;
556#endif
da2fa9ba
ML
557 /*
558 * These consistent DMA memory pools give us guaranteed
559 * alignment for hardware-accessed data structures,
560 * and less memory waste in accomplishing the alignment.
561 */
562 struct dma_pool *crqb_pool;
563 struct dma_pool *crpb_pool;
564 struct dma_pool *sg_tbl_pool;
02a121da
ML
565};
566
47c2b677 567struct mv_hw_ops {
2a47ce06
JG
568 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
569 unsigned int port);
47c2b677
JG
570 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
571 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
572 void __iomem *mmio);
c9d39130
JG
573 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
574 unsigned int n_hc);
522479fb 575 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 576 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
577};
578
82ef04fb
TH
579static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
580static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
581static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
582static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
31961943
BR
583static int mv_port_start(struct ata_port *ap);
584static void mv_port_stop(struct ata_port *ap);
3e4a1391 585static int mv_qc_defer(struct ata_queued_cmd *qc);
31961943 586static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 587static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 588static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
589static int mv_hardreset(struct ata_link *link, unsigned int *class,
590 unsigned long deadline);
bdd4ddde
JG
591static void mv_eh_freeze(struct ata_port *ap);
592static void mv_eh_thaw(struct ata_port *ap);
f273827e 593static void mv6_dev_config(struct ata_device *dev);
20f733e7 594
2a47ce06
JG
595static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
596 unsigned int port);
47c2b677
JG
597static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
598static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
599 void __iomem *mmio);
c9d39130
JG
600static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
601 unsigned int n_hc);
522479fb 602static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 603static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 604
2a47ce06
JG
605static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
606 unsigned int port);
47c2b677
JG
607static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
608static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
609 void __iomem *mmio);
c9d39130
JG
610static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
611 unsigned int n_hc);
522479fb 612static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
613static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
614 void __iomem *mmio);
615static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
616 void __iomem *mmio);
617static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
618 void __iomem *mmio, unsigned int n_hc);
619static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
620 void __iomem *mmio);
621static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
29b7e43c
MM
622static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
623 void __iomem *mmio, unsigned int port);
7bb3c529 624static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 625static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 626 unsigned int port_no);
e12bef50 627static int mv_stop_edma(struct ata_port *ap);
b562468c 628static int mv_stop_edma_engine(void __iomem *port_mmio);
00b81235 629static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
47c2b677 630
e49856d8
ML
631static void mv_pmp_select(struct ata_port *ap, int pmp);
632static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
633 unsigned long deadline);
634static int mv_softreset(struct ata_link *link, unsigned int *class,
635 unsigned long deadline);
29d187bb 636static void mv_pmp_error_handler(struct ata_port *ap);
4c299ca3
ML
637static void mv_process_crpb_entries(struct ata_port *ap,
638 struct mv_port_priv *pp);
47c2b677 639
da14265e
ML
640static void mv_sff_irq_clear(struct ata_port *ap);
641static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
642static void mv_bmdma_setup(struct ata_queued_cmd *qc);
643static void mv_bmdma_start(struct ata_queued_cmd *qc);
644static void mv_bmdma_stop(struct ata_queued_cmd *qc);
645static u8 mv_bmdma_status(struct ata_port *ap);
d16ab3f6 646static u8 mv_sff_check_status(struct ata_port *ap);
da14265e 647
eb73d558
ML
648/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
649 * because we have to allow room for worst case splitting of
650 * PRDs for 64K boundaries in mv_fill_sg().
651 */
c5d3e45a 652static struct scsi_host_template mv5_sht = {
68d1d07b 653 ATA_BASE_SHT(DRV_NAME),
baf14aa1 654 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 655 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
656};
657
658static struct scsi_host_template mv6_sht = {
68d1d07b 659 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 660 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 661 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 662 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
663};
664
029cfd6b
TH
665static struct ata_port_operations mv5_ops = {
666 .inherits = &ata_sff_port_ops,
c9d39130 667
c96f1732
AC
668 .lost_interrupt = ATA_OP_NULL,
669
3e4a1391 670 .qc_defer = mv_qc_defer,
c9d39130
JG
671 .qc_prep = mv_qc_prep,
672 .qc_issue = mv_qc_issue,
c9d39130 673
bdd4ddde
JG
674 .freeze = mv_eh_freeze,
675 .thaw = mv_eh_thaw,
a1efdaba 676 .hardreset = mv_hardreset,
bdd4ddde 677
c9d39130
JG
678 .scr_read = mv5_scr_read,
679 .scr_write = mv5_scr_write,
680
681 .port_start = mv_port_start,
682 .port_stop = mv_port_stop,
c9d39130
JG
683};
684
029cfd6b 685static struct ata_port_operations mv6_ops = {
8930ff25
TH
686 .inherits = &ata_bmdma_port_ops,
687
688 .lost_interrupt = ATA_OP_NULL,
689
690 .qc_defer = mv_qc_defer,
691 .qc_prep = mv_qc_prep,
692 .qc_issue = mv_qc_issue,
693
f273827e 694 .dev_config = mv6_dev_config,
20f733e7 695
8930ff25
TH
696 .freeze = mv_eh_freeze,
697 .thaw = mv_eh_thaw,
698 .hardreset = mv_hardreset,
699 .softreset = mv_softreset,
e49856d8
ML
700 .pmp_hardreset = mv_pmp_hardreset,
701 .pmp_softreset = mv_softreset,
29d187bb 702 .error_handler = mv_pmp_error_handler,
da14265e 703
8930ff25
TH
704 .scr_read = mv_scr_read,
705 .scr_write = mv_scr_write,
706
40f21b11 707 .sff_check_status = mv_sff_check_status,
da14265e
ML
708 .sff_irq_clear = mv_sff_irq_clear,
709 .check_atapi_dma = mv_check_atapi_dma,
710 .bmdma_setup = mv_bmdma_setup,
711 .bmdma_start = mv_bmdma_start,
712 .bmdma_stop = mv_bmdma_stop,
713 .bmdma_status = mv_bmdma_status,
8930ff25
TH
714
715 .port_start = mv_port_start,
716 .port_stop = mv_port_stop,
20f733e7
BR
717};
718
029cfd6b
TH
719static struct ata_port_operations mv_iie_ops = {
720 .inherits = &mv6_ops,
721 .dev_config = ATA_OP_NULL,
e4e7b892 722 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
723};
724
98ac62de 725static const struct ata_port_info mv_port_info[] = {
20f733e7 726 { /* chip_504x */
91b1a84c 727 .flags = MV_GEN_I_FLAGS,
c361acbc 728 .pio_mask = ATA_PIO4,
bf6263a8 729 .udma_mask = ATA_UDMA6,
c9d39130 730 .port_ops = &mv5_ops,
20f733e7
BR
731 },
732 { /* chip_508x */
91b1a84c 733 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 734 .pio_mask = ATA_PIO4,
bf6263a8 735 .udma_mask = ATA_UDMA6,
c9d39130 736 .port_ops = &mv5_ops,
20f733e7 737 },
47c2b677 738 { /* chip_5080 */
91b1a84c 739 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 740 .pio_mask = ATA_PIO4,
bf6263a8 741 .udma_mask = ATA_UDMA6,
c9d39130 742 .port_ops = &mv5_ops,
47c2b677 743 },
20f733e7 744 { /* chip_604x */
91b1a84c 745 .flags = MV_GEN_II_FLAGS,
c361acbc 746 .pio_mask = ATA_PIO4,
bf6263a8 747 .udma_mask = ATA_UDMA6,
c9d39130 748 .port_ops = &mv6_ops,
20f733e7
BR
749 },
750 { /* chip_608x */
91b1a84c 751 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 752 .pio_mask = ATA_PIO4,
bf6263a8 753 .udma_mask = ATA_UDMA6,
c9d39130 754 .port_ops = &mv6_ops,
20f733e7 755 },
e4e7b892 756 { /* chip_6042 */
91b1a84c 757 .flags = MV_GEN_IIE_FLAGS,
c361acbc 758 .pio_mask = ATA_PIO4,
bf6263a8 759 .udma_mask = ATA_UDMA6,
e4e7b892
JG
760 .port_ops = &mv_iie_ops,
761 },
762 { /* chip_7042 */
91b1a84c 763 .flags = MV_GEN_IIE_FLAGS,
c361acbc 764 .pio_mask = ATA_PIO4,
bf6263a8 765 .udma_mask = ATA_UDMA6,
e4e7b892
JG
766 .port_ops = &mv_iie_ops,
767 },
f351b2d6 768 { /* chip_soc */
91b1a84c 769 .flags = MV_GEN_IIE_FLAGS,
c361acbc 770 .pio_mask = ATA_PIO4,
17c5aab5
ML
771 .udma_mask = ATA_UDMA6,
772 .port_ops = &mv_iie_ops,
f351b2d6 773 },
20f733e7
BR
774};
775
3b7d697d 776static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
777 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
778 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
779 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
780 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
46c5784c
ML
781 /* RocketRAID 1720/174x have different identifiers */
782 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
4462254a
ML
783 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
784 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
2d2744fc
JG
785
786 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
787 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
788 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
789 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
790 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
791
792 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
793
d9f9c6bc
FA
794 /* Adaptec 1430SA */
795 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
796
02a121da 797 /* Marvell 7042 support */
6a3d586d
MT
798 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
799
02a121da
ML
800 /* Highpoint RocketRAID PCIe series */
801 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
802 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
803
2d2744fc 804 { } /* terminate list */
20f733e7
BR
805};
806
47c2b677
JG
807static const struct mv_hw_ops mv5xxx_ops = {
808 .phy_errata = mv5_phy_errata,
809 .enable_leds = mv5_enable_leds,
810 .read_preamp = mv5_read_preamp,
811 .reset_hc = mv5_reset_hc,
522479fb
JG
812 .reset_flash = mv5_reset_flash,
813 .reset_bus = mv5_reset_bus,
47c2b677
JG
814};
815
816static const struct mv_hw_ops mv6xxx_ops = {
817 .phy_errata = mv6_phy_errata,
818 .enable_leds = mv6_enable_leds,
819 .read_preamp = mv6_read_preamp,
820 .reset_hc = mv6_reset_hc,
522479fb
JG
821 .reset_flash = mv6_reset_flash,
822 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
823};
824
f351b2d6
SB
825static const struct mv_hw_ops mv_soc_ops = {
826 .phy_errata = mv6_phy_errata,
827 .enable_leds = mv_soc_enable_leds,
828 .read_preamp = mv_soc_read_preamp,
829 .reset_hc = mv_soc_reset_hc,
830 .reset_flash = mv_soc_reset_flash,
831 .reset_bus = mv_soc_reset_bus,
832};
833
29b7e43c
MM
834static const struct mv_hw_ops mv_soc_65n_ops = {
835 .phy_errata = mv_soc_65n_phy_errata,
836 .enable_leds = mv_soc_enable_leds,
837 .reset_hc = mv_soc_reset_hc,
838 .reset_flash = mv_soc_reset_flash,
839 .reset_bus = mv_soc_reset_bus,
840};
841
20f733e7
BR
842/*
843 * Functions
844 */
845
846static inline void writelfl(unsigned long data, void __iomem *addr)
847{
848 writel(data, addr);
849 (void) readl(addr); /* flush to avoid PCI posted write */
850}
851
c9d39130
JG
852static inline unsigned int mv_hc_from_port(unsigned int port)
853{
854 return port >> MV_PORT_HC_SHIFT;
855}
856
857static inline unsigned int mv_hardport_from_port(unsigned int port)
858{
859 return port & MV_PORT_MASK;
860}
861
1cfd19ae
ML
862/*
863 * Consolidate some rather tricky bit shift calculations.
864 * This is hot-path stuff, so not a function.
865 * Simple code, with two return values, so macro rather than inline.
866 *
867 * port is the sole input, in range 0..7.
7368f919
ML
868 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
869 * hardport is the other output, in range 0..3.
1cfd19ae
ML
870 *
871 * Note that port and hardport may be the same variable in some cases.
872 */
873#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
874{ \
875 shift = mv_hc_from_port(port) * HC_SHIFT; \
876 hardport = mv_hardport_from_port(port); \
877 shift += hardport * 2; \
878}
879
352fab70
ML
880static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
881{
cae5a29d 882 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
352fab70
ML
883}
884
c9d39130
JG
885static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
886 unsigned int port)
887{
888 return mv_hc_base(base, mv_hc_from_port(port));
889}
890
20f733e7
BR
891static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
892{
c9d39130 893 return mv_hc_base_from_port(base, port) +
8b260248 894 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 895 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
896}
897
e12bef50
ML
898static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
899{
900 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
901 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
902
903 return hc_mmio + ofs;
904}
905
f351b2d6
SB
906static inline void __iomem *mv_host_base(struct ata_host *host)
907{
908 struct mv_host_priv *hpriv = host->private_data;
909 return hpriv->base;
910}
911
20f733e7
BR
912static inline void __iomem *mv_ap_base(struct ata_port *ap)
913{
f351b2d6 914 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
915}
916
cca3974e 917static inline int mv_get_hc_count(unsigned long port_flags)
31961943 918{
cca3974e 919 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
920}
921
08da1759
ML
922/**
923 * mv_save_cached_regs - (re-)initialize cached port registers
924 * @ap: the port whose registers we are caching
925 *
926 * Initialize the local cache of port registers,
927 * so that reading them over and over again can
928 * be avoided on the hotter paths of this driver.
929 * This saves a few microseconds each time we switch
930 * to/from EDMA mode to perform (eg.) a drive cache flush.
931 */
932static void mv_save_cached_regs(struct ata_port *ap)
933{
934 void __iomem *port_mmio = mv_ap_base(ap);
935 struct mv_port_priv *pp = ap->private_data;
936
cae5a29d
ML
937 pp->cached.fiscfg = readl(port_mmio + FISCFG);
938 pp->cached.ltmode = readl(port_mmio + LTMODE);
939 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
940 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
08da1759
ML
941}
942
943/**
944 * mv_write_cached_reg - write to a cached port register
945 * @addr: hardware address of the register
946 * @old: pointer to cached value of the register
947 * @new: new value for the register
948 *
949 * Write a new value to a cached register,
950 * but only if the value is different from before.
951 */
952static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
953{
954 if (new != *old) {
12f3b6d7 955 unsigned long laddr;
08da1759 956 *old = new;
12f3b6d7
ML
957 /*
958 * Workaround for 88SX60x1-B2 FEr SATA#13:
959 * Read-after-write is needed to prevent generating 64-bit
960 * write cycles on the PCI bus for SATA interface registers
961 * at offsets ending in 0x4 or 0xc.
962 *
963 * Looks like a lot of fuss, but it avoids an unnecessary
964 * +1 usec read-after-write delay for unaffected registers.
965 */
966 laddr = (long)addr & 0xffff;
967 if (laddr >= 0x300 && laddr <= 0x33c) {
968 laddr &= 0x000f;
969 if (laddr == 0x4 || laddr == 0xc) {
970 writelfl(new, addr); /* read after write */
971 return;
972 }
973 }
974 writel(new, addr); /* unaffected by the errata */
08da1759
ML
975 }
976}
977
c5d3e45a
JG
978static void mv_set_edma_ptrs(void __iomem *port_mmio,
979 struct mv_host_priv *hpriv,
980 struct mv_port_priv *pp)
981{
bdd4ddde
JG
982 u32 index;
983
c5d3e45a
JG
984 /*
985 * initialize request queue
986 */
fcfb1f77
ML
987 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
988 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
bdd4ddde 989
c5d3e45a 990 WARN_ON(pp->crqb_dma & 0x3ff);
cae5a29d 991 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
bdd4ddde 992 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
cae5a29d
ML
993 port_mmio + EDMA_REQ_Q_IN_PTR);
994 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
c5d3e45a
JG
995
996 /*
997 * initialize response queue
998 */
fcfb1f77
ML
999 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
1000 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
bdd4ddde 1001
c5d3e45a 1002 WARN_ON(pp->crpb_dma & 0xff);
cae5a29d
ML
1003 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1004 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
bdd4ddde 1005 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
cae5a29d 1006 port_mmio + EDMA_RSP_Q_OUT_PTR);
c5d3e45a
JG
1007}
1008
2b748a0a
ML
1009static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1010{
1011 /*
1012 * When writing to the main_irq_mask in hardware,
1013 * we must ensure exclusivity between the interrupt coalescing bits
1014 * and the corresponding individual port DONE_IRQ bits.
1015 *
1016 * Note that this register is really an "IRQ enable" register,
1017 * not an "IRQ mask" register as Marvell's naming might suggest.
1018 */
1019 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1020 mask &= ~DONE_IRQ_0_3;
1021 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1022 mask &= ~DONE_IRQ_4_7;
1023 writelfl(mask, hpriv->main_irq_mask_addr);
1024}
1025
c4de573b
ML
1026static void mv_set_main_irq_mask(struct ata_host *host,
1027 u32 disable_bits, u32 enable_bits)
1028{
1029 struct mv_host_priv *hpriv = host->private_data;
1030 u32 old_mask, new_mask;
1031
96e2c487 1032 old_mask = hpriv->main_irq_mask;
c4de573b 1033 new_mask = (old_mask & ~disable_bits) | enable_bits;
96e2c487
ML
1034 if (new_mask != old_mask) {
1035 hpriv->main_irq_mask = new_mask;
2b748a0a 1036 mv_write_main_irq_mask(new_mask, hpriv);
96e2c487 1037 }
c4de573b
ML
1038}
1039
1040static void mv_enable_port_irqs(struct ata_port *ap,
1041 unsigned int port_bits)
1042{
1043 unsigned int shift, hardport, port = ap->port_no;
1044 u32 disable_bits, enable_bits;
1045
1046 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1047
1048 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1049 enable_bits = port_bits << shift;
1050 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1051}
1052
00b81235
ML
1053static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1054 void __iomem *port_mmio,
1055 unsigned int port_irqs)
1056{
1057 struct mv_host_priv *hpriv = ap->host->private_data;
1058 int hardport = mv_hardport_from_port(ap->port_no);
1059 void __iomem *hc_mmio = mv_hc_base_from_port(
1060 mv_host_base(ap->host), ap->port_no);
1061 u32 hc_irq_cause;
1062
1063 /* clear EDMA event indicators, if any */
cae5a29d 1064 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
00b81235
ML
1065
1066 /* clear pending irq events */
1067 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
cae5a29d 1068 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
00b81235
ML
1069
1070 /* clear FIS IRQ Cause */
1071 if (IS_GEN_IIE(hpriv))
cae5a29d 1072 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
00b81235
ML
1073
1074 mv_enable_port_irqs(ap, port_irqs);
1075}
1076
2b748a0a
ML
1077static void mv_set_irq_coalescing(struct ata_host *host,
1078 unsigned int count, unsigned int usecs)
1079{
1080 struct mv_host_priv *hpriv = host->private_data;
1081 void __iomem *mmio = hpriv->base, *hc_mmio;
1082 u32 coal_enable = 0;
1083 unsigned long flags;
6abf4678 1084 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
2b748a0a
ML
1085 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1086 ALL_PORTS_COAL_DONE;
1087
1088 /* Disable IRQ coalescing if either threshold is zero */
1089 if (!usecs || !count) {
1090 clks = count = 0;
1091 } else {
1092 /* Respect maximum limits of the hardware */
1093 clks = usecs * COAL_CLOCKS_PER_USEC;
1094 if (clks > MAX_COAL_TIME_THRESHOLD)
1095 clks = MAX_COAL_TIME_THRESHOLD;
1096 if (count > MAX_COAL_IO_COUNT)
1097 count = MAX_COAL_IO_COUNT;
1098 }
1099
1100 spin_lock_irqsave(&host->lock, flags);
6abf4678 1101 mv_set_main_irq_mask(host, coal_disable, 0);
2b748a0a 1102
6abf4678 1103 if (is_dual_hc && !IS_GEN_I(hpriv)) {
2b748a0a 1104 /*
6abf4678
ML
1105 * GEN_II/GEN_IIE with dual host controllers:
1106 * one set of global thresholds for the entire chip.
2b748a0a 1107 */
cae5a29d
ML
1108 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1109 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
2b748a0a 1110 /* clear leftover coal IRQ bit */
cae5a29d 1111 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
6abf4678
ML
1112 if (count)
1113 coal_enable = ALL_PORTS_COAL_DONE;
1114 clks = count = 0; /* force clearing of regular regs below */
2b748a0a 1115 }
6abf4678 1116
2b748a0a
ML
1117 /*
1118 * All chips: independent thresholds for each HC on the chip.
1119 */
1120 hc_mmio = mv_hc_base_from_port(mmio, 0);
cae5a29d
ML
1121 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1122 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1123 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
6abf4678
ML
1124 if (count)
1125 coal_enable |= PORTS_0_3_COAL_DONE;
1126 if (is_dual_hc) {
2b748a0a 1127 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
cae5a29d
ML
1128 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1129 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1130 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
6abf4678
ML
1131 if (count)
1132 coal_enable |= PORTS_4_7_COAL_DONE;
2b748a0a 1133 }
2b748a0a 1134
6abf4678 1135 mv_set_main_irq_mask(host, 0, coal_enable);
2b748a0a
ML
1136 spin_unlock_irqrestore(&host->lock, flags);
1137}
1138
05b308e1 1139/**
00b81235 1140 * mv_start_edma - Enable eDMA engine
05b308e1
BR
1141 * @base: port base address
1142 * @pp: port private data
1143 *
beec7dbc
TH
1144 * Verify the local cache of the eDMA state is accurate with a
1145 * WARN_ON.
05b308e1
BR
1146 *
1147 * LOCKING:
1148 * Inherited from caller.
1149 */
00b81235 1150static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
72109168 1151 struct mv_port_priv *pp, u8 protocol)
20f733e7 1152{
72109168
ML
1153 int want_ncq = (protocol == ATA_PROT_NCQ);
1154
1155 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1156 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1157 if (want_ncq != using_ncq)
b562468c 1158 mv_stop_edma(ap);
72109168 1159 }
c5d3e45a 1160 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 1161 struct mv_host_priv *hpriv = ap->host->private_data;
0c58912e 1162
00b81235 1163 mv_edma_cfg(ap, want_ncq, 1);
0c58912e 1164
f630d562 1165 mv_set_edma_ptrs(port_mmio, hpriv, pp);
00b81235 1166 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
bdd4ddde 1167
cae5a29d 1168 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
afb0edd9
BR
1169 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1170 }
20f733e7
BR
1171}
1172
9b2c4e0b
ML
1173static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1174{
1175 void __iomem *port_mmio = mv_ap_base(ap);
1176 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1177 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1178 int i;
1179
1180 /*
1181 * Wait for the EDMA engine to finish transactions in progress.
c46938cc
ML
1182 * No idea what a good "timeout" value might be, but measurements
1183 * indicate that it often requires hundreds of microseconds
1184 * with two drives in-use. So we use the 15msec value above
1185 * as a rough guess at what even more drives might require.
9b2c4e0b
ML
1186 */
1187 for (i = 0; i < timeout; ++i) {
cae5a29d 1188 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
9b2c4e0b
ML
1189 if ((edma_stat & empty_idle) == empty_idle)
1190 break;
1191 udelay(per_loop);
1192 }
a9a79dfe 1193 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
9b2c4e0b
ML
1194}
1195
05b308e1 1196/**
e12bef50 1197 * mv_stop_edma_engine - Disable eDMA engine
b562468c 1198 * @port_mmio: io base address
05b308e1
BR
1199 *
1200 * LOCKING:
1201 * Inherited from caller.
1202 */
b562468c 1203static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 1204{
b562468c 1205 int i;
31961943 1206
b562468c 1207 /* Disable eDMA. The disable bit auto clears. */
cae5a29d 1208 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
8b260248 1209
b562468c
ML
1210 /* Wait for the chip to confirm eDMA is off. */
1211 for (i = 10000; i > 0; i--) {
cae5a29d 1212 u32 reg = readl(port_mmio + EDMA_CMD);
4537deb5 1213 if (!(reg & EDMA_EN))
b562468c
ML
1214 return 0;
1215 udelay(10);
31961943 1216 }
b562468c 1217 return -EIO;
20f733e7
BR
1218}
1219
e12bef50 1220static int mv_stop_edma(struct ata_port *ap)
0ea9e179 1221{
b562468c
ML
1222 void __iomem *port_mmio = mv_ap_base(ap);
1223 struct mv_port_priv *pp = ap->private_data;
66e57a2c 1224 int err = 0;
0ea9e179 1225
b562468c
ML
1226 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1227 return 0;
1228 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
9b2c4e0b 1229 mv_wait_for_edma_empty_idle(ap);
b562468c 1230 if (mv_stop_edma_engine(port_mmio)) {
a9a79dfe 1231 ata_port_err(ap, "Unable to stop eDMA\n");
66e57a2c 1232 err = -EIO;
b562468c 1233 }
66e57a2c
ML
1234 mv_edma_cfg(ap, 0, 0);
1235 return err;
0ea9e179
JG
1236}
1237
8a70f8dc 1238#ifdef ATA_DEBUG
31961943 1239static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 1240{
31961943
BR
1241 int b, w;
1242 for (b = 0; b < bytes; ) {
1243 DPRINTK("%p: ", start + b);
1244 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 1245 printk("%08x ", readl(start + b));
31961943
BR
1246 b += sizeof(u32);
1247 }
1248 printk("\n");
1249 }
31961943 1250}
8a70f8dc
JG
1251#endif
1252
31961943
BR
1253static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1254{
1255#ifdef ATA_DEBUG
1256 int b, w;
1257 u32 dw;
1258 for (b = 0; b < bytes; ) {
1259 DPRINTK("%02x: ", b);
1260 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
1261 (void) pci_read_config_dword(pdev, b, &dw);
1262 printk("%08x ", dw);
31961943
BR
1263 b += sizeof(u32);
1264 }
1265 printk("\n");
1266 }
1267#endif
1268}
1269static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1270 struct pci_dev *pdev)
1271{
1272#ifdef ATA_DEBUG
8b260248 1273 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
1274 port >> MV_PORT_HC_SHIFT);
1275 void __iomem *port_base;
1276 int start_port, num_ports, p, start_hc, num_hcs, hc;
1277
1278 if (0 > port) {
1279 start_hc = start_port = 0;
1280 num_ports = 8; /* shld be benign for 4 port devs */
1281 num_hcs = 2;
1282 } else {
1283 start_hc = port >> MV_PORT_HC_SHIFT;
1284 start_port = port;
1285 num_ports = num_hcs = 1;
1286 }
8b260248 1287 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
1288 num_ports > 1 ? num_ports - 1 : start_port);
1289
1290 if (NULL != pdev) {
1291 DPRINTK("PCI config space regs:\n");
1292 mv_dump_pci_cfg(pdev, 0x68);
1293 }
1294 DPRINTK("PCI regs:\n");
1295 mv_dump_mem(mmio_base+0xc00, 0x3c);
1296 mv_dump_mem(mmio_base+0xd00, 0x34);
1297 mv_dump_mem(mmio_base+0xf00, 0x4);
1298 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1299 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1300 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1301 DPRINTK("HC regs (HC %i):\n", hc);
1302 mv_dump_mem(hc_base, 0x1c);
1303 }
1304 for (p = start_port; p < start_port + num_ports; p++) {
1305 port_base = mv_port_base(mmio_base, p);
2dcb407e 1306 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1307 mv_dump_mem(port_base, 0x54);
2dcb407e 1308 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1309 mv_dump_mem(port_base+0x300, 0x60);
1310 }
1311#endif
20f733e7
BR
1312}
1313
1314static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1315{
1316 unsigned int ofs;
1317
1318 switch (sc_reg_in) {
1319 case SCR_STATUS:
1320 case SCR_CONTROL:
1321 case SCR_ERROR:
cae5a29d 1322 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
20f733e7
BR
1323 break;
1324 case SCR_ACTIVE:
cae5a29d 1325 ofs = SATA_ACTIVE; /* active is not with the others */
20f733e7
BR
1326 break;
1327 default:
1328 ofs = 0xffffffffU;
1329 break;
1330 }
1331 return ofs;
1332}
1333
82ef04fb 1334static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1335{
1336 unsigned int ofs = mv_scr_offset(sc_reg_in);
1337
da3dbb17 1338 if (ofs != 0xffffffffU) {
82ef04fb 1339 *val = readl(mv_ap_base(link->ap) + ofs);
da3dbb17
TH
1340 return 0;
1341 } else
1342 return -EINVAL;
20f733e7
BR
1343}
1344
82ef04fb 1345static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1346{
1347 unsigned int ofs = mv_scr_offset(sc_reg_in);
1348
da3dbb17 1349 if (ofs != 0xffffffffU) {
20091773
ML
1350 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1351 if (sc_reg_in == SCR_CONTROL) {
1352 /*
1353 * Workaround for 88SX60x1 FEr SATA#26:
1354 *
25985edc 1355 * COMRESETs have to take care not to accidentally
20091773
ML
1356 * put the drive to sleep when writing SCR_CONTROL.
1357 * Setting bits 12..15 prevents this problem.
1358 *
1359 * So if we see an outbound COMMRESET, set those bits.
1360 * Ditto for the followup write that clears the reset.
1361 *
1362 * The proprietary driver does this for
1363 * all chip versions, and so do we.
1364 */
1365 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1366 val |= 0xf000;
1367 }
1368 writelfl(val, addr);
da3dbb17
TH
1369 return 0;
1370 } else
1371 return -EINVAL;
20f733e7
BR
1372}
1373
f273827e
ML
1374static void mv6_dev_config(struct ata_device *adev)
1375{
1376 /*
e49856d8
ML
1377 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1378 *
1379 * Gen-II does not support NCQ over a port multiplier
1380 * (no FIS-based switching).
f273827e 1381 */
e49856d8 1382 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1383 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1384 adev->flags &= ~ATA_DFLAG_NCQ;
a9a79dfe 1385 ata_dev_info(adev,
352fab70 1386 "NCQ disabled for command-based switching\n");
352fab70 1387 }
e49856d8 1388 }
f273827e
ML
1389}
1390
3e4a1391
ML
1391static int mv_qc_defer(struct ata_queued_cmd *qc)
1392{
1393 struct ata_link *link = qc->dev->link;
1394 struct ata_port *ap = link->ap;
1395 struct mv_port_priv *pp = ap->private_data;
1396
29d187bb
ML
1397 /*
1398 * Don't allow new commands if we're in a delayed EH state
1399 * for NCQ and/or FIS-based switching.
1400 */
1401 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1402 return ATA_DEFER_PORT;
159a7ff7
GG
1403
1404 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1405 * can run concurrently.
1406 * set excl_link when we want to send a PIO command in DMA mode
1407 * or a non-NCQ command in NCQ mode.
1408 * When we receive a command from that link, and there are no
1409 * outstanding commands, mark a flag to clear excl_link and let
1410 * the command go through.
1411 */
1412 if (unlikely(ap->excl_link)) {
1413 if (link == ap->excl_link) {
1414 if (ap->nr_active_links)
1415 return ATA_DEFER_PORT;
1416 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1417 return 0;
1418 } else
1419 return ATA_DEFER_PORT;
1420 }
1421
3e4a1391
ML
1422 /*
1423 * If the port is completely idle, then allow the new qc.
1424 */
1425 if (ap->nr_active_links == 0)
1426 return 0;
1427
4bdee6c5
TH
1428 /*
1429 * The port is operating in host queuing mode (EDMA) with NCQ
1430 * enabled, allow multiple NCQ commands. EDMA also allows
1431 * queueing multiple DMA commands but libata core currently
1432 * doesn't allow it.
1433 */
1434 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
159a7ff7
GG
1435 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1436 if (ata_is_ncq(qc->tf.protocol))
1437 return 0;
1438 else {
1439 ap->excl_link = link;
1440 return ATA_DEFER_PORT;
1441 }
1442 }
4bdee6c5 1443
3e4a1391
ML
1444 return ATA_DEFER_PORT;
1445}
1446
08da1759 1447static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
e49856d8 1448{
08da1759
ML
1449 struct mv_port_priv *pp = ap->private_data;
1450 void __iomem *port_mmio;
00f42eab 1451
08da1759
ML
1452 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1453 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1454 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
00f42eab 1455
08da1759
ML
1456 ltmode = *old_ltmode & ~LTMODE_BIT8;
1457 haltcond = *old_haltcond | EDMA_ERR_DEV;
00f42eab
ML
1458
1459 if (want_fbs) {
08da1759
ML
1460 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1461 ltmode = *old_ltmode | LTMODE_BIT8;
4c299ca3 1462 if (want_ncq)
08da1759 1463 haltcond &= ~EDMA_ERR_DEV;
4c299ca3 1464 else
08da1759
ML
1465 fiscfg |= FISCFG_WAIT_DEV_ERR;
1466 } else {
1467 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
e49856d8 1468 }
00f42eab 1469
08da1759 1470 port_mmio = mv_ap_base(ap);
cae5a29d
ML
1471 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1472 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1473 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
f273827e
ML
1474}
1475
dd2890f6
ML
1476static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1477{
1478 struct mv_host_priv *hpriv = ap->host->private_data;
1479 u32 old, new;
1480
1481 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
cae5a29d 1482 old = readl(hpriv->base + GPIO_PORT_CTL);
dd2890f6
ML
1483 if (want_ncq)
1484 new = old | (1 << 22);
1485 else
1486 new = old & ~(1 << 22);
1487 if (new != old)
cae5a29d 1488 writel(new, hpriv->base + GPIO_PORT_CTL);
dd2890f6
ML
1489}
1490
c01e8a23 1491/**
40f21b11
ML
1492 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1493 * @ap: Port being initialized
c01e8a23
ML
1494 *
1495 * There are two DMA modes on these chips: basic DMA, and EDMA.
1496 *
1497 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1498 * of basic DMA on the GEN_IIE versions of the chips.
1499 *
1500 * This bit survives EDMA resets, and must be set for basic DMA
1501 * to function, and should be cleared when EDMA is active.
1502 */
1503static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1504{
1505 struct mv_port_priv *pp = ap->private_data;
1506 u32 new, *old = &pp->cached.unknown_rsvd;
1507
1508 if (enable_bmdma)
1509 new = *old | 1;
1510 else
1511 new = *old & ~1;
cae5a29d 1512 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
c01e8a23
ML
1513}
1514
000b344f
ML
1515/*
1516 * SOC chips have an issue whereby the HDD LEDs don't always blink
1517 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1518 * of the SOC takes care of it, generating a steady blink rate when
1519 * any drive on the chip is active.
1520 *
1521 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1522 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1523 *
1524 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1525 * LED operation works then, and provides better (more accurate) feedback.
1526 *
1527 * Note that this code assumes that an SOC never has more than one HC onboard.
1528 */
1529static void mv_soc_led_blink_enable(struct ata_port *ap)
1530{
1531 struct ata_host *host = ap->host;
1532 struct mv_host_priv *hpriv = host->private_data;
1533 void __iomem *hc_mmio;
1534 u32 led_ctrl;
1535
1536 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1537 return;
1538 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1539 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
cae5a29d
ML
1540 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1541 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
000b344f
ML
1542}
1543
1544static void mv_soc_led_blink_disable(struct ata_port *ap)
1545{
1546 struct ata_host *host = ap->host;
1547 struct mv_host_priv *hpriv = host->private_data;
1548 void __iomem *hc_mmio;
1549 u32 led_ctrl;
1550 unsigned int port;
1551
1552 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1553 return;
1554
1555 /* disable led-blink only if no ports are using NCQ */
1556 for (port = 0; port < hpriv->n_ports; port++) {
1557 struct ata_port *this_ap = host->ports[port];
1558 struct mv_port_priv *pp = this_ap->private_data;
1559
1560 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1561 return;
1562 }
1563
1564 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1565 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
cae5a29d
ML
1566 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1567 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
000b344f
ML
1568}
1569
00b81235 1570static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
e4e7b892 1571{
0c58912e 1572 u32 cfg;
e12bef50
ML
1573 struct mv_port_priv *pp = ap->private_data;
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1576
1577 /* set up non-NCQ EDMA configuration */
0c58912e 1578 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
d16ab3f6
ML
1579 pp->pp_flags &=
1580 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
e4e7b892 1581
0c58912e 1582 if (IS_GEN_I(hpriv))
e4e7b892
JG
1583 cfg |= (1 << 8); /* enab config burst size mask */
1584
dd2890f6 1585 else if (IS_GEN_II(hpriv)) {
e4e7b892 1586 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
dd2890f6 1587 mv_60x1_errata_sata25(ap, want_ncq);
e4e7b892 1588
dd2890f6 1589 } else if (IS_GEN_IIE(hpriv)) {
00f42eab
ML
1590 int want_fbs = sata_pmp_attached(ap);
1591 /*
1592 * Possible future enhancement:
1593 *
1594 * The chip can use FBS with non-NCQ, if we allow it,
1595 * But first we need to have the error handling in place
1596 * for this mode (datasheet section 7.3.15.4.2.3).
1597 * So disallow non-NCQ FBS for now.
1598 */
1599 want_fbs &= want_ncq;
1600
08da1759 1601 mv_config_fbs(ap, want_ncq, want_fbs);
00f42eab
ML
1602
1603 if (want_fbs) {
1604 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1605 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1606 }
1607
e728eabe 1608 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
00b81235
ML
1609 if (want_edma) {
1610 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1611 if (!IS_SOC(hpriv))
1612 cfg |= (1 << 18); /* enab early completion */
1613 }
616d4a98
ML
1614 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1615 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
c01e8a23 1616 mv_bmdma_enable_iie(ap, !want_edma);
000b344f
ML
1617
1618 if (IS_SOC(hpriv)) {
1619 if (want_ncq)
1620 mv_soc_led_blink_enable(ap);
1621 else
1622 mv_soc_led_blink_disable(ap);
1623 }
e4e7b892
JG
1624 }
1625
72109168
ML
1626 if (want_ncq) {
1627 cfg |= EDMA_CFG_NCQ;
1628 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
00b81235 1629 }
72109168 1630
cae5a29d 1631 writelfl(cfg, port_mmio + EDMA_CFG);
e4e7b892
JG
1632}
1633
da2fa9ba
ML
1634static void mv_port_free_dma_mem(struct ata_port *ap)
1635{
1636 struct mv_host_priv *hpriv = ap->host->private_data;
1637 struct mv_port_priv *pp = ap->private_data;
eb73d558 1638 int tag;
da2fa9ba
ML
1639
1640 if (pp->crqb) {
1641 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1642 pp->crqb = NULL;
1643 }
1644 if (pp->crpb) {
1645 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1646 pp->crpb = NULL;
1647 }
eb73d558
ML
1648 /*
1649 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1650 * For later hardware, we have one unique sg_tbl per NCQ tag.
1651 */
1652 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1653 if (pp->sg_tbl[tag]) {
1654 if (tag == 0 || !IS_GEN_I(hpriv))
1655 dma_pool_free(hpriv->sg_tbl_pool,
1656 pp->sg_tbl[tag],
1657 pp->sg_tbl_dma[tag]);
1658 pp->sg_tbl[tag] = NULL;
1659 }
da2fa9ba
ML
1660 }
1661}
1662
05b308e1
BR
1663/**
1664 * mv_port_start - Port specific init/start routine.
1665 * @ap: ATA channel to manipulate
1666 *
1667 * Allocate and point to DMA memory, init port private memory,
1668 * zero indices.
1669 *
1670 * LOCKING:
1671 * Inherited from caller.
1672 */
31961943
BR
1673static int mv_port_start(struct ata_port *ap)
1674{
cca3974e
JG
1675 struct device *dev = ap->host->dev;
1676 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1677 struct mv_port_priv *pp;
933cb8e5 1678 unsigned long flags;
dde20207 1679 int tag;
31961943 1680
24dc5f33 1681 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1682 if (!pp)
24dc5f33 1683 return -ENOMEM;
da2fa9ba 1684 ap->private_data = pp;
31961943 1685
da2fa9ba
ML
1686 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1687 if (!pp->crqb)
1688 return -ENOMEM;
1689 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1690
da2fa9ba
ML
1691 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1692 if (!pp->crpb)
1693 goto out_port_free_dma_mem;
1694 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1695
3bd0a70e
ML
1696 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1697 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1698 ap->flags |= ATA_FLAG_AN;
eb73d558
ML
1699 /*
1700 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1701 * For later hardware, we need one unique sg_tbl per NCQ tag.
1702 */
1703 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1704 if (tag == 0 || !IS_GEN_I(hpriv)) {
1705 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1706 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1707 if (!pp->sg_tbl[tag])
1708 goto out_port_free_dma_mem;
1709 } else {
1710 pp->sg_tbl[tag] = pp->sg_tbl[0];
1711 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1712 }
1713 }
933cb8e5
ML
1714
1715 spin_lock_irqsave(ap->lock, flags);
08da1759 1716 mv_save_cached_regs(ap);
66e57a2c 1717 mv_edma_cfg(ap, 0, 0);
933cb8e5
ML
1718 spin_unlock_irqrestore(ap->lock, flags);
1719
31961943 1720 return 0;
da2fa9ba
ML
1721
1722out_port_free_dma_mem:
1723 mv_port_free_dma_mem(ap);
1724 return -ENOMEM;
31961943
BR
1725}
1726
05b308e1
BR
1727/**
1728 * mv_port_stop - Port specific cleanup/stop routine.
1729 * @ap: ATA channel to manipulate
1730 *
1731 * Stop DMA, cleanup port memory.
1732 *
1733 * LOCKING:
cca3974e 1734 * This routine uses the host lock to protect the DMA stop.
05b308e1 1735 */
31961943
BR
1736static void mv_port_stop(struct ata_port *ap)
1737{
933cb8e5
ML
1738 unsigned long flags;
1739
1740 spin_lock_irqsave(ap->lock, flags);
e12bef50 1741 mv_stop_edma(ap);
88e675e1 1742 mv_enable_port_irqs(ap, 0);
933cb8e5 1743 spin_unlock_irqrestore(ap->lock, flags);
da2fa9ba 1744 mv_port_free_dma_mem(ap);
31961943
BR
1745}
1746
05b308e1
BR
1747/**
1748 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1749 * @qc: queued command whose SG list to source from
1750 *
1751 * Populate the SG list and mark the last entry.
1752 *
1753 * LOCKING:
1754 * Inherited from caller.
1755 */
6c08772e 1756static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1757{
1758 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1759 struct scatterlist *sg;
3be6cbd7 1760 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1761 unsigned int si;
31961943 1762
eb73d558 1763 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1764 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1765 dma_addr_t addr = sg_dma_address(sg);
1766 u32 sg_len = sg_dma_len(sg);
22374677 1767
4007b493
OJ
1768 while (sg_len) {
1769 u32 offset = addr & 0xffff;
1770 u32 len = sg_len;
22374677 1771
32cd11a6 1772 if (offset + len > 0x10000)
4007b493
OJ
1773 len = 0x10000 - offset;
1774
1775 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1776 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1777 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
32cd11a6 1778 mv_sg->reserved = 0;
4007b493
OJ
1779
1780 sg_len -= len;
1781 addr += len;
1782
3be6cbd7 1783 last_sg = mv_sg;
4007b493 1784 mv_sg++;
4007b493 1785 }
31961943 1786 }
3be6cbd7
JG
1787
1788 if (likely(last_sg))
1789 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
32cd11a6 1790 mb(); /* ensure data structure is visible to the chipset */
31961943
BR
1791}
1792
5796d1c4 1793static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1794{
559eedad 1795 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1796 (last ? CRQB_CMD_LAST : 0);
559eedad 1797 *cmdw = cpu_to_le16(tmp);
31961943
BR
1798}
1799
da14265e
ML
1800/**
1801 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1802 * @ap: Port associated with this ATA transaction.
1803 *
1804 * We need this only for ATAPI bmdma transactions,
1805 * as otherwise we experience spurious interrupts
1806 * after libata-sff handles the bmdma interrupts.
1807 */
1808static void mv_sff_irq_clear(struct ata_port *ap)
1809{
1810 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1811}
1812
1813/**
1814 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1815 * @qc: queued command to check for chipset/DMA compatibility.
1816 *
1817 * The bmdma engines cannot handle speculative data sizes
1818 * (bytecount under/over flow). So only allow DMA for
1819 * data transfer commands with known data sizes.
1820 *
1821 * LOCKING:
1822 * Inherited from caller.
1823 */
1824static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1825{
1826 struct scsi_cmnd *scmd = qc->scsicmd;
1827
1828 if (scmd) {
1829 switch (scmd->cmnd[0]) {
1830 case READ_6:
1831 case READ_10:
1832 case READ_12:
1833 case WRITE_6:
1834 case WRITE_10:
1835 case WRITE_12:
1836 case GPCMD_READ_CD:
1837 case GPCMD_SEND_DVD_STRUCTURE:
1838 case GPCMD_SEND_CUE_SHEET:
1839 return 0; /* DMA is safe */
1840 }
1841 }
1842 return -EOPNOTSUPP; /* use PIO instead */
1843}
1844
1845/**
1846 * mv_bmdma_setup - Set up BMDMA transaction
1847 * @qc: queued command to prepare DMA for.
1848 *
1849 * LOCKING:
1850 * Inherited from caller.
1851 */
1852static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1853{
1854 struct ata_port *ap = qc->ap;
1855 void __iomem *port_mmio = mv_ap_base(ap);
1856 struct mv_port_priv *pp = ap->private_data;
1857
1858 mv_fill_sg(qc);
1859
1860 /* clear all DMA cmd bits */
cae5a29d 1861 writel(0, port_mmio + BMDMA_CMD);
da14265e
ML
1862
1863 /* load PRD table addr. */
1864 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
cae5a29d 1865 port_mmio + BMDMA_PRD_HIGH);
da14265e 1866 writelfl(pp->sg_tbl_dma[qc->tag],
cae5a29d 1867 port_mmio + BMDMA_PRD_LOW);
da14265e
ML
1868
1869 /* issue r/w command */
1870 ap->ops->sff_exec_command(ap, &qc->tf);
1871}
1872
1873/**
1874 * mv_bmdma_start - Start a BMDMA transaction
1875 * @qc: queued command to start DMA on.
1876 *
1877 * LOCKING:
1878 * Inherited from caller.
1879 */
1880static void mv_bmdma_start(struct ata_queued_cmd *qc)
1881{
1882 struct ata_port *ap = qc->ap;
1883 void __iomem *port_mmio = mv_ap_base(ap);
1884 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1885 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1886
1887 /* start host DMA transaction */
cae5a29d 1888 writelfl(cmd, port_mmio + BMDMA_CMD);
da14265e
ML
1889}
1890
1891/**
1892 * mv_bmdma_stop - Stop BMDMA transfer
1893 * @qc: queued command to stop DMA on.
1894 *
1895 * Clears the ATA_DMA_START flag in the bmdma control register
1896 *
1897 * LOCKING:
1898 * Inherited from caller.
1899 */
44b73380 1900static void mv_bmdma_stop_ap(struct ata_port *ap)
da14265e 1901{
da14265e
ML
1902 void __iomem *port_mmio = mv_ap_base(ap);
1903 u32 cmd;
1904
1905 /* clear start/stop bit */
cae5a29d 1906 cmd = readl(port_mmio + BMDMA_CMD);
44b73380
ML
1907 if (cmd & ATA_DMA_START) {
1908 cmd &= ~ATA_DMA_START;
1909 writelfl(cmd, port_mmio + BMDMA_CMD);
1910
1911 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1912 ata_sff_dma_pause(ap);
1913 }
1914}
da14265e 1915
44b73380
ML
1916static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1917{
1918 mv_bmdma_stop_ap(qc->ap);
da14265e
ML
1919}
1920
1921/**
1922 * mv_bmdma_status - Read BMDMA status
1923 * @ap: port for which to retrieve DMA status.
1924 *
1925 * Read and return equivalent of the sff BMDMA status register.
1926 *
1927 * LOCKING:
1928 * Inherited from caller.
1929 */
1930static u8 mv_bmdma_status(struct ata_port *ap)
1931{
1932 void __iomem *port_mmio = mv_ap_base(ap);
1933 u32 reg, status;
1934
1935 /*
1936 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1937 * and the ATA_DMA_INTR bit doesn't exist.
1938 */
cae5a29d 1939 reg = readl(port_mmio + BMDMA_STATUS);
da14265e
ML
1940 if (reg & ATA_DMA_ACTIVE)
1941 status = ATA_DMA_ACTIVE;
44b73380 1942 else if (reg & ATA_DMA_ERR)
da14265e 1943 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
44b73380
ML
1944 else {
1945 /*
1946 * Just because DMA_ACTIVE is 0 (DMA completed),
1947 * this does _not_ mean the device is "done".
1948 * So we should not yet be signalling ATA_DMA_INTR
1949 * in some cases. Eg. DSM/TRIM, and perhaps others.
1950 */
1951 mv_bmdma_stop_ap(ap);
1952 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1953 status = 0;
1954 else
1955 status = ATA_DMA_INTR;
1956 }
da14265e
ML
1957 return status;
1958}
1959
299b3f8d
ML
1960static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1961{
1962 struct ata_taskfile *tf = &qc->tf;
1963 /*
1964 * Workaround for 88SX60x1 FEr SATA#24.
1965 *
1966 * Chip may corrupt WRITEs if multi_count >= 4kB.
1967 * Note that READs are unaffected.
1968 *
1969 * It's not clear if this errata really means "4K bytes",
1970 * or if it always happens for multi_count > 7
1971 * regardless of device sector_size.
1972 *
1973 * So, for safety, any write with multi_count > 7
1974 * gets converted here into a regular PIO write instead:
1975 */
1976 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1977 if (qc->dev->multi_count > 7) {
1978 switch (tf->command) {
1979 case ATA_CMD_WRITE_MULTI:
1980 tf->command = ATA_CMD_PIO_WRITE;
1981 break;
1982 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1983 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1984 /* fall through */
1985 case ATA_CMD_WRITE_MULTI_EXT:
1986 tf->command = ATA_CMD_PIO_WRITE_EXT;
1987 break;
1988 }
1989 }
1990 }
1991}
1992
05b308e1
BR
1993/**
1994 * mv_qc_prep - Host specific command preparation.
1995 * @qc: queued command to prepare
1996 *
1997 * This routine simply redirects to the general purpose routine
1998 * if command is not DMA. Else, it handles prep of the CRQB
1999 * (command request block), does some sanity checking, and calls
2000 * the SG load routine.
2001 *
2002 * LOCKING:
2003 * Inherited from caller.
2004 */
31961943
BR
2005static void mv_qc_prep(struct ata_queued_cmd *qc)
2006{
2007 struct ata_port *ap = qc->ap;
2008 struct mv_port_priv *pp = ap->private_data;
e1469874 2009 __le16 *cw;
8d2b450d 2010 struct ata_taskfile *tf = &qc->tf;
31961943 2011 u16 flags = 0;
a6432436 2012 unsigned in_index;
31961943 2013
299b3f8d
ML
2014 switch (tf->protocol) {
2015 case ATA_PROT_DMA:
44b73380
ML
2016 if (tf->command == ATA_CMD_DSM)
2017 return;
2018 /* fall-thru */
299b3f8d
ML
2019 case ATA_PROT_NCQ:
2020 break; /* continue below */
2021 case ATA_PROT_PIO:
2022 mv_rw_multi_errata_sata24(qc);
31961943 2023 return;
299b3f8d
ML
2024 default:
2025 return;
2026 }
20f733e7 2027
31961943
BR
2028 /* Fill in command request block
2029 */
8d2b450d 2030 if (!(tf->flags & ATA_TFLAG_WRITE))
31961943 2031 flags |= CRQB_FLAG_READ;
beec7dbc 2032 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 2033 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 2034 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 2035
bdd4ddde 2036 /* get current queue index from software */
fcfb1f77 2037 in_index = pp->req_idx;
a6432436
ML
2038
2039 pp->crqb[in_index].sg_addr =
eb73d558 2040 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 2041 pp->crqb[in_index].sg_addr_hi =
eb73d558 2042 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 2043 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 2044
a6432436 2045 cw = &pp->crqb[in_index].ata_cmd[0];
31961943 2046
25985edc 2047 /* Sadly, the CRQB cannot accommodate all registers--there are
31961943
BR
2048 * only 11 bytes...so we must pick and choose required
2049 * registers based on the command. So, we drop feature and
2050 * hob_feature for [RW] DMA commands, but they are needed for
cd12e1f7
ML
2051 * NCQ. NCQ will drop hob_nsect, which is not needed there
2052 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
20f733e7 2053 */
31961943
BR
2054 switch (tf->command) {
2055 case ATA_CMD_READ:
2056 case ATA_CMD_READ_EXT:
2057 case ATA_CMD_WRITE:
2058 case ATA_CMD_WRITE_EXT:
c15d85c8 2059 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
2060 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2061 break;
31961943
BR
2062 case ATA_CMD_FPDMA_READ:
2063 case ATA_CMD_FPDMA_WRITE:
8b260248 2064 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
2065 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2066 break;
31961943
BR
2067 default:
2068 /* The only other commands EDMA supports in non-queued and
2069 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2070 * of which are defined/used by Linux. If we get here, this
2071 * driver needs work.
2072 *
2073 * FIXME: modify libata to give qc_prep a return value and
2074 * return error here.
2075 */
2076 BUG_ON(tf->command);
2077 break;
2078 }
2079 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2080 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2081 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2082 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2083 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2084 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2085 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2086 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2087 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2088
e4e7b892
JG
2089 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2090 return;
2091 mv_fill_sg(qc);
2092}
2093
2094/**
2095 * mv_qc_prep_iie - Host specific command preparation.
2096 * @qc: queued command to prepare
2097 *
2098 * This routine simply redirects to the general purpose routine
2099 * if command is not DMA. Else, it handles prep of the CRQB
2100 * (command request block), does some sanity checking, and calls
2101 * the SG load routine.
2102 *
2103 * LOCKING:
2104 * Inherited from caller.
2105 */
2106static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2107{
2108 struct ata_port *ap = qc->ap;
2109 struct mv_port_priv *pp = ap->private_data;
2110 struct mv_crqb_iie *crqb;
8d2b450d 2111 struct ata_taskfile *tf = &qc->tf;
a6432436 2112 unsigned in_index;
e4e7b892
JG
2113 u32 flags = 0;
2114
8d2b450d
ML
2115 if ((tf->protocol != ATA_PROT_DMA) &&
2116 (tf->protocol != ATA_PROT_NCQ))
e4e7b892 2117 return;
44b73380
ML
2118 if (tf->command == ATA_CMD_DSM)
2119 return; /* use bmdma for this */
e4e7b892 2120
e12bef50 2121 /* Fill in Gen IIE command request block */
8d2b450d 2122 if (!(tf->flags & ATA_TFLAG_WRITE))
e4e7b892
JG
2123 flags |= CRQB_FLAG_READ;
2124
beec7dbc 2125 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 2126 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 2127 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 2128 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 2129
bdd4ddde 2130 /* get current queue index from software */
fcfb1f77 2131 in_index = pp->req_idx;
a6432436
ML
2132
2133 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
2134 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2135 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
2136 crqb->flags = cpu_to_le32(flags);
2137
e4e7b892
JG
2138 crqb->ata_cmd[0] = cpu_to_le32(
2139 (tf->command << 16) |
2140 (tf->feature << 24)
2141 );
2142 crqb->ata_cmd[1] = cpu_to_le32(
2143 (tf->lbal << 0) |
2144 (tf->lbam << 8) |
2145 (tf->lbah << 16) |
2146 (tf->device << 24)
2147 );
2148 crqb->ata_cmd[2] = cpu_to_le32(
2149 (tf->hob_lbal << 0) |
2150 (tf->hob_lbam << 8) |
2151 (tf->hob_lbah << 16) |
2152 (tf->hob_feature << 24)
2153 );
2154 crqb->ata_cmd[3] = cpu_to_le32(
2155 (tf->nsect << 0) |
2156 (tf->hob_nsect << 8)
2157 );
2158
2159 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 2160 return;
31961943
BR
2161 mv_fill_sg(qc);
2162}
2163
d16ab3f6
ML
2164/**
2165 * mv_sff_check_status - fetch device status, if valid
2166 * @ap: ATA port to fetch status from
2167 *
2168 * When using command issue via mv_qc_issue_fis(),
2169 * the initial ATA_BUSY state does not show up in the
2170 * ATA status (shadow) register. This can confuse libata!
2171 *
2172 * So we have a hook here to fake ATA_BUSY for that situation,
2173 * until the first time a BUSY, DRQ, or ERR bit is seen.
2174 *
2175 * The rest of the time, it simply returns the ATA status register.
2176 */
2177static u8 mv_sff_check_status(struct ata_port *ap)
2178{
2179 u8 stat = ioread8(ap->ioaddr.status_addr);
2180 struct mv_port_priv *pp = ap->private_data;
2181
2182 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2183 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2184 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2185 else
2186 stat = ATA_BUSY;
2187 }
2188 return stat;
2189}
2190
70f8b79c
ML
2191/**
2192 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2193 * @fis: fis to be sent
2194 * @nwords: number of 32-bit words in the fis
2195 */
2196static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2197{
2198 void __iomem *port_mmio = mv_ap_base(ap);
2199 u32 ifctl, old_ifctl, ifstat;
2200 int i, timeout = 200, final_word = nwords - 1;
2201
2202 /* Initiate FIS transmission mode */
cae5a29d 2203 old_ifctl = readl(port_mmio + SATA_IFCTL);
70f8b79c 2204 ifctl = 0x100 | (old_ifctl & 0xf);
cae5a29d 2205 writelfl(ifctl, port_mmio + SATA_IFCTL);
70f8b79c
ML
2206
2207 /* Send all words of the FIS except for the final word */
2208 for (i = 0; i < final_word; ++i)
cae5a29d 2209 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
70f8b79c
ML
2210
2211 /* Flag end-of-transmission, and then send the final word */
cae5a29d
ML
2212 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2213 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
70f8b79c
ML
2214
2215 /*
2216 * Wait for FIS transmission to complete.
2217 * This typically takes just a single iteration.
2218 */
2219 do {
cae5a29d 2220 ifstat = readl(port_mmio + SATA_IFSTAT);
70f8b79c
ML
2221 } while (!(ifstat & 0x1000) && --timeout);
2222
2223 /* Restore original port configuration */
cae5a29d 2224 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
70f8b79c
ML
2225
2226 /* See if it worked */
2227 if ((ifstat & 0x3000) != 0x1000) {
a9a79dfe
JP
2228 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2229 __func__, ifstat);
70f8b79c
ML
2230 return AC_ERR_OTHER;
2231 }
2232 return 0;
2233}
2234
2235/**
2236 * mv_qc_issue_fis - Issue a command directly as a FIS
2237 * @qc: queued command to start
2238 *
2239 * Note that the ATA shadow registers are not updated
2240 * after command issue, so the device will appear "READY"
2241 * if polled, even while it is BUSY processing the command.
2242 *
2243 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2244 *
2245 * Note: we don't get updated shadow regs on *completion*
2246 * of non-data commands. So avoid sending them via this function,
2247 * as they will appear to have completed immediately.
2248 *
2249 * GEN_IIE has special registers that we could get the result tf from,
2250 * but earlier chipsets do not. For now, we ignore those registers.
2251 */
2252static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2253{
2254 struct ata_port *ap = qc->ap;
2255 struct mv_port_priv *pp = ap->private_data;
2256 struct ata_link *link = qc->dev->link;
2257 u32 fis[5];
2258 int err = 0;
2259
2260 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
4c4a90fd 2261 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
70f8b79c
ML
2262 if (err)
2263 return err;
2264
2265 switch (qc->tf.protocol) {
2266 case ATAPI_PROT_PIO:
2267 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2268 /* fall through */
2269 case ATAPI_PROT_NODATA:
2270 ap->hsm_task_state = HSM_ST_FIRST;
2271 break;
2272 case ATA_PROT_PIO:
2273 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2274 if (qc->tf.flags & ATA_TFLAG_WRITE)
2275 ap->hsm_task_state = HSM_ST_FIRST;
2276 else
2277 ap->hsm_task_state = HSM_ST;
2278 break;
2279 default:
2280 ap->hsm_task_state = HSM_ST_LAST;
2281 break;
2282 }
2283
2284 if (qc->tf.flags & ATA_TFLAG_POLLING)
ea3c6450 2285 ata_sff_queue_pio_task(link, 0);
70f8b79c
ML
2286 return 0;
2287}
2288
05b308e1
BR
2289/**
2290 * mv_qc_issue - Initiate a command to the host
2291 * @qc: queued command to start
2292 *
2293 * This routine simply redirects to the general purpose routine
2294 * if command is not DMA. Else, it sanity checks our local
2295 * caches of the request producer/consumer indices then enables
2296 * DMA and bumps the request producer index.
2297 *
2298 * LOCKING:
2299 * Inherited from caller.
2300 */
9a3d9eb0 2301static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 2302{
f48765cc 2303 static int limit_warnings = 10;
c5d3e45a
JG
2304 struct ata_port *ap = qc->ap;
2305 void __iomem *port_mmio = mv_ap_base(ap);
2306 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 2307 u32 in_index;
42ed893d 2308 unsigned int port_irqs;
f48765cc 2309
d16ab3f6
ML
2310 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2311
f48765cc
ML
2312 switch (qc->tf.protocol) {
2313 case ATA_PROT_DMA:
44b73380
ML
2314 if (qc->tf.command == ATA_CMD_DSM) {
2315 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2316 return AC_ERR_OTHER;
2317 break; /* use bmdma for this */
2318 }
2319 /* fall thru */
f48765cc
ML
2320 case ATA_PROT_NCQ:
2321 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2322 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2323 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2324
2325 /* Write the request in pointer to kick the EDMA to life */
2326 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
cae5a29d 2327 port_mmio + EDMA_REQ_Q_IN_PTR);
f48765cc 2328 return 0;
31961943 2329
f48765cc 2330 case ATA_PROT_PIO:
c6112bd8
ML
2331 /*
2332 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2333 *
2334 * Someday, we might implement special polling workarounds
2335 * for these, but it all seems rather unnecessary since we
2336 * normally use only DMA for commands which transfer more
2337 * than a single block of data.
2338 *
2339 * Much of the time, this could just work regardless.
2340 * So for now, just log the incident, and allow the attempt.
2341 */
c7843e8f 2342 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
c6112bd8 2343 --limit_warnings;
a9a79dfe
JP
2344 ata_link_warn(qc->dev->link, DRV_NAME
2345 ": attempting PIO w/multiple DRQ: "
2346 "this may fail due to h/w errata\n");
c6112bd8 2347 }
f48765cc 2348 /* drop through */
42ed893d 2349 case ATA_PROT_NODATA:
f48765cc 2350 case ATAPI_PROT_PIO:
42ed893d
ML
2351 case ATAPI_PROT_NODATA:
2352 if (ap->flags & ATA_FLAG_PIO_POLLING)
2353 qc->tf.flags |= ATA_TFLAG_POLLING;
2354 break;
31961943 2355 }
42ed893d
ML
2356
2357 if (qc->tf.flags & ATA_TFLAG_POLLING)
2358 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2359 else
2360 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2361
2362 /*
2363 * We're about to send a non-EDMA capable command to the
2364 * port. Turn off EDMA so there won't be problems accessing
2365 * shadow block, etc registers.
2366 */
2367 mv_stop_edma(ap);
2368 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2369 mv_pmp_select(ap, qc->dev->link->pmp);
70f8b79c
ML
2370
2371 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2372 struct mv_host_priv *hpriv = ap->host->private_data;
2373 /*
2374 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
40f21b11 2375 *
70f8b79c
ML
2376 * After any NCQ error, the READ_LOG_EXT command
2377 * from libata-eh *must* use mv_qc_issue_fis().
2378 * Otherwise it might fail, due to chip errata.
2379 *
2380 * Rather than special-case it, we'll just *always*
2381 * use this method here for READ_LOG_EXT, making for
2382 * easier testing.
2383 */
2384 if (IS_GEN_II(hpriv))
2385 return mv_qc_issue_fis(qc);
2386 }
360ff783 2387 return ata_bmdma_qc_issue(qc);
31961943
BR
2388}
2389
8f767f8a
ML
2390static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2391{
2392 struct mv_port_priv *pp = ap->private_data;
2393 struct ata_queued_cmd *qc;
2394
2395 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2396 return NULL;
2397 qc = ata_qc_from_tag(ap, ap->link.active_tag);
3e4ec344
TH
2398 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2399 return qc;
2400 return NULL;
8f767f8a
ML
2401}
2402
29d187bb
ML
2403static void mv_pmp_error_handler(struct ata_port *ap)
2404{
2405 unsigned int pmp, pmp_map;
2406 struct mv_port_priv *pp = ap->private_data;
2407
2408 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2409 /*
2410 * Perform NCQ error analysis on failed PMPs
2411 * before we freeze the port entirely.
2412 *
2413 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2414 */
2415 pmp_map = pp->delayed_eh_pmp_map;
2416 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2417 for (pmp = 0; pmp_map != 0; pmp++) {
2418 unsigned int this_pmp = (1 << pmp);
2419 if (pmp_map & this_pmp) {
2420 struct ata_link *link = &ap->pmp_link[pmp];
2421 pmp_map &= ~this_pmp;
2422 ata_eh_analyze_ncq_error(link);
2423 }
2424 }
2425 ata_port_freeze(ap);
2426 }
2427 sata_pmp_error_handler(ap);
2428}
2429
4c299ca3
ML
2430static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2431{
2432 void __iomem *port_mmio = mv_ap_base(ap);
2433
cae5a29d 2434 return readl(port_mmio + SATA_TESTCTL) >> 16;
4c299ca3
ML
2435}
2436
4c299ca3
ML
2437static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2438{
2439 struct ata_eh_info *ehi;
2440 unsigned int pmp;
2441
2442 /*
2443 * Initialize EH info for PMPs which saw device errors
2444 */
2445 ehi = &ap->link.eh_info;
2446 for (pmp = 0; pmp_map != 0; pmp++) {
2447 unsigned int this_pmp = (1 << pmp);
2448 if (pmp_map & this_pmp) {
2449 struct ata_link *link = &ap->pmp_link[pmp];
2450
2451 pmp_map &= ~this_pmp;
2452 ehi = &link->eh_info;
2453 ata_ehi_clear_desc(ehi);
2454 ata_ehi_push_desc(ehi, "dev err");
2455 ehi->err_mask |= AC_ERR_DEV;
2456 ehi->action |= ATA_EH_RESET;
2457 ata_link_abort(link);
2458 }
2459 }
2460}
2461
06aaca3f
ML
2462static int mv_req_q_empty(struct ata_port *ap)
2463{
2464 void __iomem *port_mmio = mv_ap_base(ap);
2465 u32 in_ptr, out_ptr;
2466
cae5a29d 2467 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
06aaca3f 2468 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
cae5a29d 2469 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
06aaca3f
ML
2470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2471 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2472}
2473
4c299ca3
ML
2474static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2475{
2476 struct mv_port_priv *pp = ap->private_data;
2477 int failed_links;
2478 unsigned int old_map, new_map;
2479
2480 /*
2481 * Device error during FBS+NCQ operation:
2482 *
2483 * Set a port flag to prevent further I/O being enqueued.
2484 * Leave the EDMA running to drain outstanding commands from this port.
2485 * Perform the post-mortem/EH only when all responses are complete.
2486 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2487 */
2488 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2489 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2490 pp->delayed_eh_pmp_map = 0;
2491 }
2492 old_map = pp->delayed_eh_pmp_map;
2493 new_map = old_map | mv_get_err_pmp_map(ap);
2494
2495 if (old_map != new_map) {
2496 pp->delayed_eh_pmp_map = new_map;
2497 mv_pmp_eh_prep(ap, new_map & ~old_map);
2498 }
c46938cc 2499 failed_links = hweight16(new_map);
4c299ca3 2500
a9a79dfe
JP
2501 ata_port_info(ap,
2502 "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2503 __func__, pp->delayed_eh_pmp_map,
2504 ap->qc_active, failed_links,
2505 ap->nr_active_links);
4c299ca3 2506
06aaca3f 2507 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
4c299ca3
ML
2508 mv_process_crpb_entries(ap, pp);
2509 mv_stop_edma(ap);
2510 mv_eh_freeze(ap);
a9a79dfe 2511 ata_port_info(ap, "%s: done\n", __func__);
4c299ca3
ML
2512 return 1; /* handled */
2513 }
a9a79dfe 2514 ata_port_info(ap, "%s: waiting\n", __func__);
4c299ca3
ML
2515 return 1; /* handled */
2516}
2517
2518static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2519{
2520 /*
2521 * Possible future enhancement:
2522 *
2523 * FBS+non-NCQ operation is not yet implemented.
2524 * See related notes in mv_edma_cfg().
2525 *
2526 * Device error during FBS+non-NCQ operation:
2527 *
2528 * We need to snapshot the shadow registers for each failed command.
2529 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2530 */
2531 return 0; /* not handled */
2532}
2533
2534static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2535{
2536 struct mv_port_priv *pp = ap->private_data;
2537
2538 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2539 return 0; /* EDMA was not active: not handled */
2540 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2541 return 0; /* FBS was not active: not handled */
2542
2543 if (!(edma_err_cause & EDMA_ERR_DEV))
2544 return 0; /* non DEV error: not handled */
2545 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2546 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2547 return 0; /* other problems: not handled */
2548
2549 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2550 /*
2551 * EDMA should NOT have self-disabled for this case.
2552 * If it did, then something is wrong elsewhere,
2553 * and we cannot handle it here.
2554 */
2555 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
a9a79dfe
JP
2556 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2557 __func__, edma_err_cause, pp->pp_flags);
4c299ca3
ML
2558 return 0; /* not handled */
2559 }
2560 return mv_handle_fbs_ncq_dev_err(ap);
2561 } else {
2562 /*
2563 * EDMA should have self-disabled for this case.
2564 * If it did not, then something is wrong elsewhere,
2565 * and we cannot handle it here.
2566 */
2567 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
a9a79dfe
JP
2568 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2569 __func__, edma_err_cause, pp->pp_flags);
4c299ca3
ML
2570 return 0; /* not handled */
2571 }
2572 return mv_handle_fbs_non_ncq_dev_err(ap);
2573 }
2574 return 0; /* not handled */
2575}
2576
a9010329 2577static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
8f767f8a 2578{
8f767f8a 2579 struct ata_eh_info *ehi = &ap->link.eh_info;
a9010329 2580 char *when = "idle";
8f767f8a 2581
8f767f8a 2582 ata_ehi_clear_desc(ehi);
3e4ec344 2583 if (edma_was_enabled) {
a9010329 2584 when = "EDMA enabled";
8f767f8a
ML
2585 } else {
2586 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2587 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
a9010329 2588 when = "polling";
8f767f8a 2589 }
a9010329 2590 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
8f767f8a
ML
2591 ehi->err_mask |= AC_ERR_OTHER;
2592 ehi->action |= ATA_EH_RESET;
2593 ata_port_freeze(ap);
2594}
2595
05b308e1
BR
2596/**
2597 * mv_err_intr - Handle error interrupts on the port
2598 * @ap: ATA channel to manipulate
2599 *
8d07379d
ML
2600 * Most cases require a full reset of the chip's state machine,
2601 * which also performs a COMRESET.
2602 * Also, if the port disabled DMA, update our cached copy to match.
05b308e1
BR
2603 *
2604 * LOCKING:
2605 * Inherited from caller.
2606 */
37b9046a 2607static void mv_err_intr(struct ata_port *ap)
31961943
BR
2608{
2609 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde 2610 u32 edma_err_cause, eh_freeze_mask, serr = 0;
e4006077 2611 u32 fis_cause = 0;
bdd4ddde
JG
2612 struct mv_port_priv *pp = ap->private_data;
2613 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 2614 unsigned int action = 0, err_mask = 0;
9af5c9c9 2615 struct ata_eh_info *ehi = &ap->link.eh_info;
37b9046a
ML
2616 struct ata_queued_cmd *qc;
2617 int abort = 0;
20f733e7 2618
8d07379d 2619 /*
37b9046a 2620 * Read and clear the SError and err_cause bits.
e4006077
ML
2621 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2622 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
8d07379d 2623 */
37b9046a
ML
2624 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2625 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2626
cae5a29d 2627 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
e4006077 2628 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
cae5a29d
ML
2629 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2630 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
e4006077 2631 }
cae5a29d 2632 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
bdd4ddde 2633
4c299ca3
ML
2634 if (edma_err_cause & EDMA_ERR_DEV) {
2635 /*
2636 * Device errors during FIS-based switching operation
2637 * require special handling.
2638 */
2639 if (mv_handle_dev_err(ap, edma_err_cause))
2640 return;
2641 }
2642
37b9046a
ML
2643 qc = mv_get_active_qc(ap);
2644 ata_ehi_clear_desc(ehi);
2645 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2646 edma_err_cause, pp->pp_flags);
e4006077 2647
c443c500 2648 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
e4006077 2649 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
cae5a29d 2650 if (fis_cause & FIS_IRQ_CAUSE_AN) {
c443c500
ML
2651 u32 ec = edma_err_cause &
2652 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2653 sata_async_notification(ap);
2654 if (!ec)
2655 return; /* Just an AN; no need for the nukes */
2656 ata_ehi_push_desc(ehi, "SDB notify");
2657 }
2658 }
bdd4ddde 2659 /*
352fab70 2660 * All generations share these EDMA error cause bits:
bdd4ddde 2661 */
37b9046a 2662 if (edma_err_cause & EDMA_ERR_DEV) {
bdd4ddde 2663 err_mask |= AC_ERR_DEV;
37b9046a
ML
2664 action |= ATA_EH_RESET;
2665 ata_ehi_push_desc(ehi, "dev error");
2666 }
bdd4ddde 2667 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 2668 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
2669 EDMA_ERR_INTRL_PAR)) {
2670 err_mask |= AC_ERR_ATA_BUS;
cf480626 2671 action |= ATA_EH_RESET;
b64bbc39 2672 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
2673 }
2674 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2675 ata_ehi_hotplugged(ehi);
2676 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 2677 "dev disconnect" : "dev connect");
cf480626 2678 action |= ATA_EH_RESET;
bdd4ddde
JG
2679 }
2680
352fab70
ML
2681 /*
2682 * Gen-I has a different SELF_DIS bit,
2683 * different FREEZE bits, and no SERR bit:
2684 */
ee9ccdf7 2685 if (IS_GEN_I(hpriv)) {
bdd4ddde 2686 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 2687 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 2688 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 2689 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
2690 }
2691 } else {
2692 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 2693 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 2694 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 2695 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 2696 }
bdd4ddde 2697 if (edma_err_cause & EDMA_ERR_SERR) {
8d07379d
ML
2698 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2699 err_mask |= AC_ERR_ATA_BUS;
cf480626 2700 action |= ATA_EH_RESET;
bdd4ddde 2701 }
afb0edd9 2702 }
20f733e7 2703
bdd4ddde
JG
2704 if (!err_mask) {
2705 err_mask = AC_ERR_OTHER;
cf480626 2706 action |= ATA_EH_RESET;
bdd4ddde
JG
2707 }
2708
2709 ehi->serror |= serr;
2710 ehi->action |= action;
2711
2712 if (qc)
2713 qc->err_mask |= err_mask;
2714 else
2715 ehi->err_mask |= err_mask;
2716
37b9046a
ML
2717 if (err_mask == AC_ERR_DEV) {
2718 /*
2719 * Cannot do ata_port_freeze() here,
2720 * because it would kill PIO access,
2721 * which is needed for further diagnosis.
2722 */
2723 mv_eh_freeze(ap);
2724 abort = 1;
2725 } else if (edma_err_cause & eh_freeze_mask) {
2726 /*
2727 * Note to self: ata_port_freeze() calls ata_port_abort()
2728 */
bdd4ddde 2729 ata_port_freeze(ap);
37b9046a
ML
2730 } else {
2731 abort = 1;
2732 }
2733
2734 if (abort) {
2735 if (qc)
2736 ata_link_abort(qc->dev->link);
2737 else
2738 ata_port_abort(ap);
2739 }
bdd4ddde
JG
2740}
2741
1aadf5c3 2742static bool mv_process_crpb_response(struct ata_port *ap,
fcfb1f77
ML
2743 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2744{
752e386c
TH
2745 u8 ata_status;
2746 u16 edma_status = le16_to_cpu(response->flags);
752e386c
TH
2747
2748 /*
2749 * edma_status from a response queue entry:
2750 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2751 * MSB is saved ATA status from command completion.
2752 */
2753 if (!ncq_enabled) {
2754 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2755 if (err_cause) {
2756 /*
2757 * Error will be seen/handled by
2758 * mv_err_intr(). So do nothing at all here.
2759 */
1aadf5c3 2760 return false;
752e386c 2761 }
fcfb1f77 2762 }
752e386c
TH
2763 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2764 if (!ac_err_mask(ata_status))
1aadf5c3 2765 return true;
752e386c 2766 /* else: leave it for mv_err_intr() */
1aadf5c3 2767 return false;
fcfb1f77
ML
2768}
2769
2770static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
bdd4ddde
JG
2771{
2772 void __iomem *port_mmio = mv_ap_base(ap);
2773 struct mv_host_priv *hpriv = ap->host->private_data;
fcfb1f77 2774 u32 in_index;
bdd4ddde 2775 bool work_done = false;
1aadf5c3 2776 u32 done_mask = 0;
fcfb1f77 2777 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
bdd4ddde 2778
fcfb1f77 2779 /* Get the hardware queue position index */
cae5a29d 2780 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
bdd4ddde
JG
2781 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2782
fcfb1f77
ML
2783 /* Process new responses from since the last time we looked */
2784 while (in_index != pp->resp_idx) {
6c1153e0 2785 unsigned int tag;
fcfb1f77 2786 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
bdd4ddde 2787
fcfb1f77 2788 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
bdd4ddde 2789
fcfb1f77
ML
2790 if (IS_GEN_I(hpriv)) {
2791 /* 50xx: no NCQ, only one command active at a time */
9af5c9c9 2792 tag = ap->link.active_tag;
fcfb1f77
ML
2793 } else {
2794 /* Gen II/IIE: get command tag from CRPB entry */
2795 tag = le16_to_cpu(response->id) & 0x1f;
bdd4ddde 2796 }
1aadf5c3
TH
2797 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2798 done_mask |= 1 << tag;
bdd4ddde 2799 work_done = true;
bdd4ddde
JG
2800 }
2801
1aadf5c3
TH
2802 if (work_done) {
2803 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2804
2805 /* Update the software queue position index in hardware */
bdd4ddde 2806 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
fcfb1f77 2807 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
cae5a29d 2808 port_mmio + EDMA_RSP_Q_OUT_PTR);
1aadf5c3 2809 }
20f733e7
BR
2810}
2811
a9010329
ML
2812static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2813{
2814 struct mv_port_priv *pp;
2815 int edma_was_enabled;
2816
a9010329
ML
2817 /*
2818 * Grab a snapshot of the EDMA_EN flag setting,
2819 * so that we have a consistent view for this port,
2820 * even if something we call of our routines changes it.
2821 */
2822 pp = ap->private_data;
2823 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2824 /*
2825 * Process completed CRPB response(s) before other events.
2826 */
2827 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2828 mv_process_crpb_entries(ap, pp);
4c299ca3
ML
2829 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2830 mv_handle_fbs_ncq_dev_err(ap);
a9010329
ML
2831 }
2832 /*
2833 * Handle chip-reported errors, or continue on to handle PIO.
2834 */
2835 if (unlikely(port_cause & ERR_IRQ)) {
2836 mv_err_intr(ap);
2837 } else if (!edma_was_enabled) {
2838 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2839 if (qc)
c3b28894 2840 ata_bmdma_port_intr(ap, qc);
a9010329
ML
2841 else
2842 mv_unexpected_intr(ap, edma_was_enabled);
2843 }
2844}
2845
05b308e1
BR
2846/**
2847 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 2848 * @host: host specific structure
7368f919 2849 * @main_irq_cause: Main interrupt cause register for the chip.
05b308e1
BR
2850 *
2851 * LOCKING:
2852 * Inherited from caller.
2853 */
7368f919 2854static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
20f733e7 2855{
f351b2d6 2856 struct mv_host_priv *hpriv = host->private_data;
eabd5eb1 2857 void __iomem *mmio = hpriv->base, *hc_mmio;
a3718c1f 2858 unsigned int handled = 0, port;
20f733e7 2859
2b748a0a
ML
2860 /* If asserted, clear the "all ports" IRQ coalescing bit */
2861 if (main_irq_cause & ALL_PORTS_COAL_DONE)
cae5a29d 2862 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2b748a0a 2863
a3718c1f 2864 for (port = 0; port < hpriv->n_ports; port++) {
cca3974e 2865 struct ata_port *ap = host->ports[port];
eabd5eb1
ML
2866 unsigned int p, shift, hardport, port_cause;
2867
a3718c1f 2868 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
a3718c1f 2869 /*
eabd5eb1
ML
2870 * Each hc within the host has its own hc_irq_cause register,
2871 * where the interrupting ports bits get ack'd.
a3718c1f 2872 */
eabd5eb1
ML
2873 if (hardport == 0) { /* first port on this hc ? */
2874 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2875 u32 port_mask, ack_irqs;
2876 /*
2877 * Skip this entire hc if nothing pending for any ports
2878 */
2879 if (!hc_cause) {
2880 port += MV_PORTS_PER_HC - 1;
2881 continue;
2882 }
2883 /*
2884 * We don't need/want to read the hc_irq_cause register,
2885 * because doing so hurts performance, and
2886 * main_irq_cause already gives us everything we need.
2887 *
2888 * But we do have to *write* to the hc_irq_cause to ack
2889 * the ports that we are handling this time through.
2890 *
2891 * This requires that we create a bitmap for those
2892 * ports which interrupted us, and use that bitmap
2893 * to ack (only) those ports via hc_irq_cause.
2894 */
2895 ack_irqs = 0;
2b748a0a
ML
2896 if (hc_cause & PORTS_0_3_COAL_DONE)
2897 ack_irqs = HC_COAL_IRQ;
eabd5eb1
ML
2898 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2899 if ((port + p) >= hpriv->n_ports)
2900 break;
2901 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2902 if (hc_cause & port_mask)
2903 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2904 }
a3718c1f 2905 hc_mmio = mv_hc_base_from_port(mmio, port);
cae5a29d 2906 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
a3718c1f
ML
2907 handled = 1;
2908 }
8f767f8a 2909 /*
a9010329 2910 * Handle interrupts signalled for this port:
8f767f8a 2911 */
a9010329
ML
2912 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2913 if (port_cause)
2914 mv_port_intr(ap, port_cause);
20f733e7 2915 }
a3718c1f 2916 return handled;
20f733e7
BR
2917}
2918
a3718c1f 2919static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
bdd4ddde 2920{
02a121da 2921 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
2922 struct ata_port *ap;
2923 struct ata_queued_cmd *qc;
2924 struct ata_eh_info *ehi;
2925 unsigned int i, err_mask, printed = 0;
2926 u32 err_cause;
2927
cae5a29d 2928 err_cause = readl(mmio + hpriv->irq_cause_offset);
bdd4ddde 2929
a44fec1f 2930 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
bdd4ddde
JG
2931
2932 DPRINTK("All regs @ PCI error\n");
2933 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2934
cae5a29d 2935 writelfl(0, mmio + hpriv->irq_cause_offset);
bdd4ddde
JG
2936
2937 for (i = 0; i < host->n_ports; i++) {
2938 ap = host->ports[i];
936fd732 2939 if (!ata_link_offline(&ap->link)) {
9af5c9c9 2940 ehi = &ap->link.eh_info;
bdd4ddde
JG
2941 ata_ehi_clear_desc(ehi);
2942 if (!printed++)
2943 ata_ehi_push_desc(ehi,
2944 "PCI err cause 0x%08x", err_cause);
2945 err_mask = AC_ERR_HOST_BUS;
cf480626 2946 ehi->action = ATA_EH_RESET;
9af5c9c9 2947 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
2948 if (qc)
2949 qc->err_mask |= err_mask;
2950 else
2951 ehi->err_mask |= err_mask;
2952
2953 ata_port_freeze(ap);
2954 }
2955 }
a3718c1f 2956 return 1; /* handled */
bdd4ddde
JG
2957}
2958
05b308e1 2959/**
c5d3e45a 2960 * mv_interrupt - Main interrupt event handler
05b308e1
BR
2961 * @irq: unused
2962 * @dev_instance: private data; in this case the host structure
05b308e1
BR
2963 *
2964 * Read the read only register to determine if any host
2965 * controllers have pending interrupts. If so, call lower level
2966 * routine to handle. Also check for PCI errors which are only
2967 * reported here.
2968 *
8b260248 2969 * LOCKING:
cca3974e 2970 * This routine holds the host lock while processing pending
05b308e1
BR
2971 * interrupts.
2972 */
7d12e780 2973static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 2974{
cca3974e 2975 struct ata_host *host = dev_instance;
f351b2d6 2976 struct mv_host_priv *hpriv = host->private_data;
a3718c1f 2977 unsigned int handled = 0;
6d3c30ef 2978 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
96e2c487 2979 u32 main_irq_cause, pending_irqs;
20f733e7 2980
646a4da5 2981 spin_lock(&host->lock);
6d3c30ef
ML
2982
2983 /* for MSI: block new interrupts while in here */
2984 if (using_msi)
2b748a0a 2985 mv_write_main_irq_mask(0, hpriv);
6d3c30ef 2986
7368f919 2987 main_irq_cause = readl(hpriv->main_irq_cause_addr);
96e2c487 2988 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
352fab70
ML
2989 /*
2990 * Deal with cases where we either have nothing pending, or have read
2991 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 2992 */
a44253d2 2993 if (pending_irqs && main_irq_cause != 0xffffffffU) {
1f398472 2994 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
a3718c1f
ML
2995 handled = mv_pci_error(host, hpriv->base);
2996 else
a44253d2 2997 handled = mv_host_intr(host, pending_irqs);
bdd4ddde 2998 }
6d3c30ef
ML
2999
3000 /* for MSI: unmask; interrupt cause bits will retrigger now */
3001 if (using_msi)
2b748a0a 3002 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
6d3c30ef 3003
9d51af7b
ML
3004 spin_unlock(&host->lock);
3005
20f733e7
BR
3006 return IRQ_RETVAL(handled);
3007}
3008
c9d39130
JG
3009static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3010{
3011 unsigned int ofs;
3012
3013 switch (sc_reg_in) {
3014 case SCR_STATUS:
3015 case SCR_ERROR:
3016 case SCR_CONTROL:
3017 ofs = sc_reg_in * sizeof(u32);
3018 break;
3019 default:
3020 ofs = 0xffffffffU;
3021 break;
3022 }
3023 return ofs;
3024}
3025
82ef04fb 3026static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
c9d39130 3027{
82ef04fb 3028 struct mv_host_priv *hpriv = link->ap->host->private_data;
f351b2d6 3029 void __iomem *mmio = hpriv->base;
82ef04fb 3030 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
c9d39130
JG
3031 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3032
da3dbb17
TH
3033 if (ofs != 0xffffffffU) {
3034 *val = readl(addr + ofs);
3035 return 0;
3036 } else
3037 return -EINVAL;
c9d39130
JG
3038}
3039
82ef04fb 3040static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
c9d39130 3041{
82ef04fb 3042 struct mv_host_priv *hpriv = link->ap->host->private_data;
f351b2d6 3043 void __iomem *mmio = hpriv->base;
82ef04fb 3044 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
c9d39130
JG
3045 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3046
da3dbb17 3047 if (ofs != 0xffffffffU) {
0d5ff566 3048 writelfl(val, addr + ofs);
da3dbb17
TH
3049 return 0;
3050 } else
3051 return -EINVAL;
c9d39130
JG
3052}
3053
7bb3c529 3054static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 3055{
7bb3c529 3056 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
3057 int early_5080;
3058
44c10138 3059 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
3060
3061 if (!early_5080) {
3062 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3063 tmp |= (1 << 0);
3064 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3065 }
3066
7bb3c529 3067 mv_reset_pci_bus(host, mmio);
522479fb
JG
3068}
3069
3070static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3071{
cae5a29d 3072 writel(0x0fcfffff, mmio + FLASH_CTL);
522479fb
JG
3073}
3074
47c2b677 3075static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
3076 void __iomem *mmio)
3077{
c9d39130
JG
3078 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3079 u32 tmp;
3080
3081 tmp = readl(phy_mmio + MV5_PHY_MODE);
3082
3083 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3084 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
3085}
3086
47c2b677 3087static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 3088{
522479fb
JG
3089 u32 tmp;
3090
cae5a29d 3091 writel(0, mmio + GPIO_PORT_CTL);
522479fb
JG
3092
3093 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3094
3095 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3096 tmp |= ~(1 << 0);
3097 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
3098}
3099
2a47ce06
JG
3100static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3101 unsigned int port)
bca1c4eb 3102{
c9d39130
JG
3103 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3104 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3105 u32 tmp;
3106 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3107
3108 if (fix_apm_sq) {
cae5a29d 3109 tmp = readl(phy_mmio + MV5_LTMODE);
c9d39130 3110 tmp |= (1 << 19);
cae5a29d 3111 writel(tmp, phy_mmio + MV5_LTMODE);
c9d39130 3112
cae5a29d 3113 tmp = readl(phy_mmio + MV5_PHY_CTL);
c9d39130
JG
3114 tmp &= ~0x3;
3115 tmp |= 0x1;
cae5a29d 3116 writel(tmp, phy_mmio + MV5_PHY_CTL);
c9d39130
JG
3117 }
3118
3119 tmp = readl(phy_mmio + MV5_PHY_MODE);
3120 tmp &= ~mask;
3121 tmp |= hpriv->signal[port].pre;
3122 tmp |= hpriv->signal[port].amps;
3123 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
3124}
3125
c9d39130
JG
3126
3127#undef ZERO
3128#define ZERO(reg) writel(0, port_mmio + (reg))
3129static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3130 unsigned int port)
3131{
3132 void __iomem *port_mmio = mv_port_base(mmio, port);
3133
e12bef50 3134 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
3135
3136 ZERO(0x028); /* command */
cae5a29d 3137 writel(0x11f, port_mmio + EDMA_CFG);
c9d39130
JG
3138 ZERO(0x004); /* timer */
3139 ZERO(0x008); /* irq err cause */
3140 ZERO(0x00c); /* irq err mask */
3141 ZERO(0x010); /* rq bah */
3142 ZERO(0x014); /* rq inp */
3143 ZERO(0x018); /* rq outp */
3144 ZERO(0x01c); /* respq bah */
3145 ZERO(0x024); /* respq outp */
3146 ZERO(0x020); /* respq inp */
3147 ZERO(0x02c); /* test control */
cae5a29d 3148 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
c9d39130
JG
3149}
3150#undef ZERO
3151
3152#define ZERO(reg) writel(0, hc_mmio + (reg))
3153static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3154 unsigned int hc)
47c2b677 3155{
c9d39130
JG
3156 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3157 u32 tmp;
3158
3159 ZERO(0x00c);
3160 ZERO(0x010);
3161 ZERO(0x014);
3162 ZERO(0x018);
3163
3164 tmp = readl(hc_mmio + 0x20);
3165 tmp &= 0x1c1c1c1c;
3166 tmp |= 0x03030303;
3167 writel(tmp, hc_mmio + 0x20);
3168}
3169#undef ZERO
3170
3171static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3172 unsigned int n_hc)
3173{
3174 unsigned int hc, port;
3175
3176 for (hc = 0; hc < n_hc; hc++) {
3177 for (port = 0; port < MV_PORTS_PER_HC; port++)
3178 mv5_reset_hc_port(hpriv, mmio,
3179 (hc * MV_PORTS_PER_HC) + port);
3180
3181 mv5_reset_one_hc(hpriv, mmio, hc);
3182 }
3183
3184 return 0;
47c2b677
JG
3185}
3186
101ffae2
JG
3187#undef ZERO
3188#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 3189static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 3190{
02a121da 3191 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
3192 u32 tmp;
3193
cae5a29d 3194 tmp = readl(mmio + MV_PCI_MODE);
101ffae2 3195 tmp &= 0xff00ffff;
cae5a29d 3196 writel(tmp, mmio + MV_PCI_MODE);
101ffae2
JG
3197
3198 ZERO(MV_PCI_DISC_TIMER);
3199 ZERO(MV_PCI_MSI_TRIGGER);
cae5a29d 3200 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
101ffae2 3201 ZERO(MV_PCI_SERR_MASK);
cae5a29d
ML
3202 ZERO(hpriv->irq_cause_offset);
3203 ZERO(hpriv->irq_mask_offset);
101ffae2
JG
3204 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3205 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3206 ZERO(MV_PCI_ERR_ATTRIBUTE);
3207 ZERO(MV_PCI_ERR_COMMAND);
3208}
3209#undef ZERO
3210
3211static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3212{
3213 u32 tmp;
3214
3215 mv5_reset_flash(hpriv, mmio);
3216
cae5a29d 3217 tmp = readl(mmio + GPIO_PORT_CTL);
101ffae2
JG
3218 tmp &= 0x3;
3219 tmp |= (1 << 5) | (1 << 6);
cae5a29d 3220 writel(tmp, mmio + GPIO_PORT_CTL);
101ffae2
JG
3221}
3222
3223/**
3224 * mv6_reset_hc - Perform the 6xxx global soft reset
3225 * @mmio: base address of the HBA
3226 *
3227 * This routine only applies to 6xxx parts.
3228 *
3229 * LOCKING:
3230 * Inherited from caller.
3231 */
c9d39130
JG
3232static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3233 unsigned int n_hc)
101ffae2 3234{
cae5a29d 3235 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
101ffae2
JG
3236 int i, rc = 0;
3237 u32 t;
3238
3239 /* Following procedure defined in PCI "main command and status
3240 * register" table.
3241 */
3242 t = readl(reg);
3243 writel(t | STOP_PCI_MASTER, reg);
3244
3245 for (i = 0; i < 1000; i++) {
3246 udelay(1);
3247 t = readl(reg);
2dcb407e 3248 if (PCI_MASTER_EMPTY & t)
101ffae2 3249 break;
101ffae2
JG
3250 }
3251 if (!(PCI_MASTER_EMPTY & t)) {
3252 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3253 rc = 1;
3254 goto done;
3255 }
3256
3257 /* set reset */
3258 i = 5;
3259 do {
3260 writel(t | GLOB_SFT_RST, reg);
3261 t = readl(reg);
3262 udelay(1);
3263 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3264
3265 if (!(GLOB_SFT_RST & t)) {
3266 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3267 rc = 1;
3268 goto done;
3269 }
3270
3271 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3272 i = 5;
3273 do {
3274 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3275 t = readl(reg);
3276 udelay(1);
3277 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3278
3279 if (GLOB_SFT_RST & t) {
3280 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3281 rc = 1;
3282 }
3283done:
3284 return rc;
3285}
3286
47c2b677 3287static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
3288 void __iomem *mmio)
3289{
3290 void __iomem *port_mmio;
3291 u32 tmp;
3292
cae5a29d 3293 tmp = readl(mmio + RESET_CFG);
ba3fe8fb 3294 if ((tmp & (1 << 0)) == 0) {
47c2b677 3295 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
3296 hpriv->signal[idx].pre = 0x1 << 5;
3297 return;
3298 }
3299
3300 port_mmio = mv_port_base(mmio, idx);
3301 tmp = readl(port_mmio + PHY_MODE2);
3302
3303 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3304 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3305}
3306
47c2b677 3307static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 3308{
cae5a29d 3309 writel(0x00000060, mmio + GPIO_PORT_CTL);
ba3fe8fb
JG
3310}
3311
c9d39130 3312static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 3313 unsigned int port)
bca1c4eb 3314{
c9d39130
JG
3315 void __iomem *port_mmio = mv_port_base(mmio, port);
3316
bca1c4eb 3317 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
3318 int fix_phy_mode2 =
3319 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 3320 int fix_phy_mode4 =
47c2b677 3321 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
8c30a8b9 3322 u32 m2, m3;
47c2b677
JG
3323
3324 if (fix_phy_mode2) {
3325 m2 = readl(port_mmio + PHY_MODE2);
3326 m2 &= ~(1 << 16);
3327 m2 |= (1 << 31);
3328 writel(m2, port_mmio + PHY_MODE2);
3329
3330 udelay(200);
3331
3332 m2 = readl(port_mmio + PHY_MODE2);
3333 m2 &= ~((1 << 16) | (1 << 31));
3334 writel(m2, port_mmio + PHY_MODE2);
3335
3336 udelay(200);
3337 }
3338
8c30a8b9
ML
3339 /*
3340 * Gen-II/IIe PHY_MODE3 errata RM#2:
3341 * Achieves better receiver noise performance than the h/w default:
3342 */
3343 m3 = readl(port_mmio + PHY_MODE3);
3344 m3 = (m3 & 0x1f) | (0x5555601 << 5);
bca1c4eb 3345
0388a8c0
ML
3346 /* Guideline 88F5182 (GL# SATA-S11) */
3347 if (IS_SOC(hpriv))
3348 m3 &= ~0x1c;
3349
bca1c4eb 3350 if (fix_phy_mode4) {
ba069e37
ML
3351 u32 m4 = readl(port_mmio + PHY_MODE4);
3352 /*
3353 * Enforce reserved-bit restrictions on GenIIe devices only.
3354 * For earlier chipsets, force only the internal config field
3355 * (workaround for errata FEr SATA#10 part 1).
3356 */
8c30a8b9 3357 if (IS_GEN_IIE(hpriv))
ba069e37
ML
3358 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3359 else
3360 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
8c30a8b9 3361 writel(m4, port_mmio + PHY_MODE4);
bca1c4eb 3362 }
b406c7a6
ML
3363 /*
3364 * Workaround for 60x1-B2 errata SATA#13:
3365 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3366 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
ba68460b 3367 * Or ensure we use writelfl() when writing PHY_MODE4.
b406c7a6
ML
3368 */
3369 writel(m3, port_mmio + PHY_MODE3);
bca1c4eb
JG
3370
3371 /* Revert values of pre-emphasis and signal amps to the saved ones */
3372 m2 = readl(port_mmio + PHY_MODE2);
3373
3374 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
3375 m2 |= hpriv->signal[port].amps;
3376 m2 |= hpriv->signal[port].pre;
47c2b677 3377 m2 &= ~(1 << 16);
bca1c4eb 3378
e4e7b892
JG
3379 /* according to mvSata 3.6.1, some IIE values are fixed */
3380 if (IS_GEN_IIE(hpriv)) {
3381 m2 &= ~0xC30FF01F;
3382 m2 |= 0x0000900F;
3383 }
3384
bca1c4eb
JG
3385 writel(m2, port_mmio + PHY_MODE2);
3386}
3387
f351b2d6
SB
3388/* TODO: use the generic LED interface to configure the SATA Presence */
3389/* & Acitivy LEDs on the board */
3390static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3391 void __iomem *mmio)
3392{
3393 return;
3394}
3395
3396static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3397 void __iomem *mmio)
3398{
3399 void __iomem *port_mmio;
3400 u32 tmp;
3401
3402 port_mmio = mv_port_base(mmio, idx);
3403 tmp = readl(port_mmio + PHY_MODE2);
3404
3405 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3406 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3407}
3408
3409#undef ZERO
3410#define ZERO(reg) writel(0, port_mmio + (reg))
3411static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3412 void __iomem *mmio, unsigned int port)
3413{
3414 void __iomem *port_mmio = mv_port_base(mmio, port);
3415
e12bef50 3416 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
3417
3418 ZERO(0x028); /* command */
cae5a29d 3419 writel(0x101f, port_mmio + EDMA_CFG);
f351b2d6
SB
3420 ZERO(0x004); /* timer */
3421 ZERO(0x008); /* irq err cause */
3422 ZERO(0x00c); /* irq err mask */
3423 ZERO(0x010); /* rq bah */
3424 ZERO(0x014); /* rq inp */
3425 ZERO(0x018); /* rq outp */
3426 ZERO(0x01c); /* respq bah */
3427 ZERO(0x024); /* respq outp */
3428 ZERO(0x020); /* respq inp */
3429 ZERO(0x02c); /* test control */
d7b0c143 3430 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
f351b2d6
SB
3431}
3432
3433#undef ZERO
3434
3435#define ZERO(reg) writel(0, hc_mmio + (reg))
3436static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3437 void __iomem *mmio)
3438{
3439 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3440
3441 ZERO(0x00c);
3442 ZERO(0x010);
3443 ZERO(0x014);
3444
3445}
3446
3447#undef ZERO
3448
3449static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3450 void __iomem *mmio, unsigned int n_hc)
3451{
3452 unsigned int port;
3453
3454 for (port = 0; port < hpriv->n_ports; port++)
3455 mv_soc_reset_hc_port(hpriv, mmio, port);
3456
3457 mv_soc_reset_one_hc(hpriv, mmio);
3458
3459 return 0;
3460}
3461
3462static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3463 void __iomem *mmio)
3464{
3465 return;
3466}
3467
3468static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3469{
3470 return;
3471}
3472
29b7e43c
MM
3473static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3474 void __iomem *mmio, unsigned int port)
3475{
3476 void __iomem *port_mmio = mv_port_base(mmio, port);
3477 u32 reg;
3478
3479 reg = readl(port_mmio + PHY_MODE3);
3480 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3481 reg |= (0x1 << 27);
3482 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3483 reg |= (0x1 << 29);
3484 writel(reg, port_mmio + PHY_MODE3);
3485
3486 reg = readl(port_mmio + PHY_MODE4);
3487 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3488 reg |= (0x1 << 16);
3489 writel(reg, port_mmio + PHY_MODE4);
3490
3491 reg = readl(port_mmio + PHY_MODE9_GEN2);
3492 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3493 reg |= 0x8;
3494 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3495 writel(reg, port_mmio + PHY_MODE9_GEN2);
3496
3497 reg = readl(port_mmio + PHY_MODE9_GEN1);
3498 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3499 reg |= 0x8;
3500 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3501 writel(reg, port_mmio + PHY_MODE9_GEN1);
3502}
3503
3504/**
3505 * soc_is_65 - check if the soc is 65 nano device
3506 *
3507 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3508 * register, this register should contain non-zero value and it exists only
3509 * in the 65 nano devices, when reading it from older devices we get 0.
3510 */
3511static bool soc_is_65n(struct mv_host_priv *hpriv)
3512{
3513 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3514
3515 if (readl(port0_mmio + PHYCFG_OFS))
3516 return true;
3517 return false;
3518}
3519
8e7decdb 3520static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
b67a1064 3521{
cae5a29d 3522 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
b67a1064 3523
8e7decdb 3524 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
b67a1064 3525 if (want_gen2i)
8e7decdb 3526 ifcfg |= (1 << 7); /* enable gen2i speed */
cae5a29d 3527 writelfl(ifcfg, port_mmio + SATA_IFCFG);
b67a1064
ML
3528}
3529
e12bef50 3530static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
3531 unsigned int port_no)
3532{
3533 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3534
8e7decdb
ML
3535 /*
3536 * The datasheet warns against setting EDMA_RESET when EDMA is active
3537 * (but doesn't say what the problem might be). So we first try
3538 * to disable the EDMA engine before doing the EDMA_RESET operation.
3539 */
0d8be5cb 3540 mv_stop_edma_engine(port_mmio);
cae5a29d 3541 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
c9d39130 3542
b67a1064 3543 if (!IS_GEN_I(hpriv)) {
8e7decdb
ML
3544 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3545 mv_setup_ifcfg(port_mmio, 1);
c9d39130 3546 }
b67a1064 3547 /*
8e7decdb 3548 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
b67a1064 3549 * link, and physical layers. It resets all SATA interface registers
cae5a29d 3550 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
c9d39130 3551 */
cae5a29d 3552 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
b67a1064 3553 udelay(25); /* allow reset propagation */
cae5a29d 3554 writelfl(0, port_mmio + EDMA_CMD);
c9d39130
JG
3555
3556 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3557
ee9ccdf7 3558 if (IS_GEN_I(hpriv))
c9d39130
JG
3559 mdelay(1);
3560}
3561
e49856d8 3562static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 3563{
e49856d8
ML
3564 if (sata_pmp_supported(ap)) {
3565 void __iomem *port_mmio = mv_ap_base(ap);
cae5a29d 3566 u32 reg = readl(port_mmio + SATA_IFCTL);
e49856d8 3567 int old = reg & 0xf;
22374677 3568
e49856d8
ML
3569 if (old != pmp) {
3570 reg = (reg & ~0xf) | pmp;
cae5a29d 3571 writelfl(reg, port_mmio + SATA_IFCTL);
e49856d8 3572 }
22374677 3573 }
20f733e7
BR
3574}
3575
e49856d8
ML
3576static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3577 unsigned long deadline)
22374677 3578{
e49856d8
ML
3579 mv_pmp_select(link->ap, sata_srst_pmp(link));
3580 return sata_std_hardreset(link, class, deadline);
3581}
bdd4ddde 3582
e49856d8
ML
3583static int mv_softreset(struct ata_link *link, unsigned int *class,
3584 unsigned long deadline)
3585{
3586 mv_pmp_select(link->ap, sata_srst_pmp(link));
3587 return ata_sff_softreset(link, class, deadline);
22374677
JG
3588}
3589
cc0680a5 3590static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 3591 unsigned long deadline)
31961943 3592{
cc0680a5 3593 struct ata_port *ap = link->ap;
bdd4ddde 3594 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 3595 struct mv_port_priv *pp = ap->private_data;
f351b2d6 3596 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
3597 int rc, attempts = 0, extra = 0;
3598 u32 sstatus;
3599 bool online;
31961943 3600
e12bef50 3601 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 3602 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
d16ab3f6
ML
3603 pp->pp_flags &=
3604 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
bdd4ddde 3605
0d8be5cb
ML
3606 /* Workaround for errata FEr SATA#10 (part 2) */
3607 do {
17c5aab5
ML
3608 const unsigned long *timing =
3609 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 3610
17c5aab5
ML
3611 rc = sata_link_hardreset(link, timing, deadline + extra,
3612 &online, NULL);
9dcffd99 3613 rc = online ? -EAGAIN : rc;
17c5aab5 3614 if (rc)
0d8be5cb 3615 return rc;
0d8be5cb
ML
3616 sata_scr_read(link, SCR_STATUS, &sstatus);
3617 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3618 /* Force 1.5gb/s link speed and try again */
8e7decdb 3619 mv_setup_ifcfg(mv_ap_base(ap), 0);
0d8be5cb
ML
3620 if (time_after(jiffies + HZ, deadline))
3621 extra = HZ; /* only extend it once, max */
3622 }
3623 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
08da1759 3624 mv_save_cached_regs(ap);
66e57a2c 3625 mv_edma_cfg(ap, 0, 0);
bdd4ddde 3626
17c5aab5 3627 return rc;
bdd4ddde
JG
3628}
3629
bdd4ddde
JG
3630static void mv_eh_freeze(struct ata_port *ap)
3631{
1cfd19ae 3632 mv_stop_edma(ap);
c4de573b 3633 mv_enable_port_irqs(ap, 0);
bdd4ddde
JG
3634}
3635
3636static void mv_eh_thaw(struct ata_port *ap)
3637{
f351b2d6 3638 struct mv_host_priv *hpriv = ap->host->private_data;
c4de573b
ML
3639 unsigned int port = ap->port_no;
3640 unsigned int hardport = mv_hardport_from_port(port);
1cfd19ae 3641 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
bdd4ddde 3642 void __iomem *port_mmio = mv_ap_base(ap);
c4de573b 3643 u32 hc_irq_cause;
bdd4ddde 3644
bdd4ddde 3645 /* clear EDMA errors on this port */
cae5a29d 3646 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
bdd4ddde
JG
3647
3648 /* clear pending irq events */
cae6edc3 3649 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
cae5a29d 3650 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
bdd4ddde 3651
88e675e1 3652 mv_enable_port_irqs(ap, ERR_IRQ);
31961943
BR
3653}
3654
05b308e1
BR
3655/**
3656 * mv_port_init - Perform some early initialization on a single port.
3657 * @port: libata data structure storing shadow register addresses
3658 * @port_mmio: base address of the port
3659 *
3660 * Initialize shadow register mmio addresses, clear outstanding
3661 * interrupts on the port, and unmask interrupts for the future
3662 * start of the port.
3663 *
3664 * LOCKING:
3665 * Inherited from caller.
3666 */
31961943 3667static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 3668{
cae5a29d 3669 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
31961943 3670
8b260248 3671 /* PIO related setup
31961943
BR
3672 */
3673 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 3674 port->error_addr =
31961943
BR
3675 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3676 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3677 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3678 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3679 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3680 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 3681 port->status_addr =
31961943
BR
3682 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3683 /* special case: control/altstatus doesn't have ATA_REG_ address */
cae5a29d 3684 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
31961943 3685
31961943 3686 /* Clear any currently outstanding port interrupt conditions */
cae5a29d
ML
3687 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3688 writelfl(readl(serr), serr);
3689 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
31961943 3690
646a4da5 3691 /* unmask all non-transient EDMA error interrupts */
cae5a29d 3692 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
20f733e7 3693
8b260248 3694 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
cae5a29d
ML
3695 readl(port_mmio + EDMA_CFG),
3696 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3697 readl(port_mmio + EDMA_ERR_IRQ_MASK));
20f733e7
BR
3698}
3699
616d4a98
ML
3700static unsigned int mv_in_pcix_mode(struct ata_host *host)
3701{
3702 struct mv_host_priv *hpriv = host->private_data;
3703 void __iomem *mmio = hpriv->base;
3704 u32 reg;
3705
1f398472 3706 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
616d4a98 3707 return 0; /* not PCI-X capable */
cae5a29d 3708 reg = readl(mmio + MV_PCI_MODE);
616d4a98
ML
3709 if ((reg & MV_PCI_MODE_MASK) == 0)
3710 return 0; /* conventional PCI mode */
3711 return 1; /* chip is in PCI-X mode */
3712}
3713
3714static int mv_pci_cut_through_okay(struct ata_host *host)
3715{
3716 struct mv_host_priv *hpriv = host->private_data;
3717 void __iomem *mmio = hpriv->base;
3718 u32 reg;
3719
3720 if (!mv_in_pcix_mode(host)) {
cae5a29d
ML
3721 reg = readl(mmio + MV_PCI_COMMAND);
3722 if (reg & MV_PCI_COMMAND_MRDTRIG)
616d4a98
ML
3723 return 0; /* not okay */
3724 }
3725 return 1; /* okay */
3726}
3727
65ad7fef
ML
3728static void mv_60x1b2_errata_pci7(struct ata_host *host)
3729{
3730 struct mv_host_priv *hpriv = host->private_data;
3731 void __iomem *mmio = hpriv->base;
3732
3733 /* workaround for 60x1-B2 errata PCI#7 */
3734 if (mv_in_pcix_mode(host)) {
cae5a29d
ML
3735 u32 reg = readl(mmio + MV_PCI_COMMAND);
3736 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
65ad7fef
ML
3737 }
3738}
3739
4447d351 3740static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 3741{
4447d351
TH
3742 struct pci_dev *pdev = to_pci_dev(host->dev);
3743 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
3744 u32 hp_flags = hpriv->hp_flags;
3745
5796d1c4 3746 switch (board_idx) {
47c2b677
JG
3747 case chip_5080:
3748 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 3749 hp_flags |= MV_HP_GEN_I;
47c2b677 3750
44c10138 3751 switch (pdev->revision) {
47c2b677
JG
3752 case 0x1:
3753 hp_flags |= MV_HP_ERRATA_50XXB0;
3754 break;
3755 case 0x3:
3756 hp_flags |= MV_HP_ERRATA_50XXB2;
3757 break;
3758 default:
a44fec1f
JP
3759 dev_warn(&pdev->dev,
3760 "Applying 50XXB2 workarounds to unknown rev\n");
47c2b677
JG
3761 hp_flags |= MV_HP_ERRATA_50XXB2;
3762 break;
3763 }
3764 break;
3765
bca1c4eb
JG
3766 case chip_504x:
3767 case chip_508x:
47c2b677 3768 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 3769 hp_flags |= MV_HP_GEN_I;
bca1c4eb 3770
44c10138 3771 switch (pdev->revision) {
47c2b677
JG
3772 case 0x0:
3773 hp_flags |= MV_HP_ERRATA_50XXB0;
3774 break;
3775 case 0x3:
3776 hp_flags |= MV_HP_ERRATA_50XXB2;
3777 break;
3778 default:
a44fec1f
JP
3779 dev_warn(&pdev->dev,
3780 "Applying B2 workarounds to unknown rev\n");
47c2b677
JG
3781 hp_flags |= MV_HP_ERRATA_50XXB2;
3782 break;
bca1c4eb
JG
3783 }
3784 break;
3785
3786 case chip_604x:
3787 case chip_608x:
47c2b677 3788 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 3789 hp_flags |= MV_HP_GEN_II;
47c2b677 3790
44c10138 3791 switch (pdev->revision) {
47c2b677 3792 case 0x7:
65ad7fef 3793 mv_60x1b2_errata_pci7(host);
47c2b677
JG
3794 hp_flags |= MV_HP_ERRATA_60X1B2;
3795 break;
3796 case 0x9:
3797 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
3798 break;
3799 default:
a44fec1f
JP
3800 dev_warn(&pdev->dev,
3801 "Applying B2 workarounds to unknown rev\n");
47c2b677 3802 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
3803 break;
3804 }
3805 break;
3806
e4e7b892 3807 case chip_7042:
616d4a98 3808 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
306b30f7
ML
3809 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3810 (pdev->device == 0x2300 || pdev->device == 0x2310))
3811 {
4e520033
ML
3812 /*
3813 * Highpoint RocketRAID PCIe 23xx series cards:
3814 *
3815 * Unconfigured drives are treated as "Legacy"
3816 * by the BIOS, and it overwrites sector 8 with
3817 * a "Lgcy" metadata block prior to Linux boot.
3818 *
3819 * Configured drives (RAID or JBOD) leave sector 8
3820 * alone, but instead overwrite a high numbered
3821 * sector for the RAID metadata. This sector can
3822 * be determined exactly, by truncating the physical
3823 * drive capacity to a nice even GB value.
3824 *
3825 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3826 *
3827 * Warn the user, lest they think we're just buggy.
3828 */
3829 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3830 " BIOS CORRUPTS DATA on all attached drives,"
3831 " regardless of if/how they are configured."
3832 " BEWARE!\n");
3833 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3834 " use sectors 8-9 on \"Legacy\" drives,"
3835 " and avoid the final two gigabytes on"
3836 " all RocketRAID BIOS initialized drives.\n");
306b30f7 3837 }
8e7decdb 3838 /* drop through */
e4e7b892
JG
3839 case chip_6042:
3840 hpriv->ops = &mv6xxx_ops;
e4e7b892 3841 hp_flags |= MV_HP_GEN_IIE;
616d4a98
ML
3842 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3843 hp_flags |= MV_HP_CUT_THROUGH;
e4e7b892 3844
44c10138 3845 switch (pdev->revision) {
5cf73bfb 3846 case 0x2: /* Rev.B0: the first/only public release */
e4e7b892
JG
3847 hp_flags |= MV_HP_ERRATA_60X1C0;
3848 break;
3849 default:
a44fec1f
JP
3850 dev_warn(&pdev->dev,
3851 "Applying 60X1C0 workarounds to unknown rev\n");
e4e7b892
JG
3852 hp_flags |= MV_HP_ERRATA_60X1C0;
3853 break;
3854 }
3855 break;
f351b2d6 3856 case chip_soc:
29b7e43c
MM
3857 if (soc_is_65n(hpriv))
3858 hpriv->ops = &mv_soc_65n_ops;
3859 else
3860 hpriv->ops = &mv_soc_ops;
eb3a55a9
SB
3861 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3862 MV_HP_ERRATA_60X1C0;
f351b2d6 3863 break;
e4e7b892 3864
bca1c4eb 3865 default:
a44fec1f 3866 dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
3867 return 1;
3868 }
3869
3870 hpriv->hp_flags = hp_flags;
02a121da 3871 if (hp_flags & MV_HP_PCIE) {
cae5a29d
ML
3872 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3873 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
02a121da
ML
3874 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3875 } else {
cae5a29d
ML
3876 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3877 hpriv->irq_mask_offset = PCI_IRQ_MASK;
02a121da
ML
3878 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3879 }
bca1c4eb
JG
3880
3881 return 0;
3882}
3883
05b308e1 3884/**
47c2b677 3885 * mv_init_host - Perform some early initialization of the host.
4447d351 3886 * @host: ATA host to initialize
05b308e1
BR
3887 *
3888 * If possible, do an early global reset of the host. Then do
3889 * our port init and clear/unmask all/relevant host interrupts.
3890 *
3891 * LOCKING:
3892 * Inherited from caller.
3893 */
1bfeff03 3894static int mv_init_host(struct ata_host *host)
20f733e7
BR
3895{
3896 int rc = 0, n_hc, port, hc;
4447d351 3897 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 3898 void __iomem *mmio = hpriv->base;
47c2b677 3899
1bfeff03 3900 rc = mv_chip_id(host, hpriv->board_idx);
bca1c4eb 3901 if (rc)
352fab70 3902 goto done;
f351b2d6 3903
1f398472 3904 if (IS_SOC(hpriv)) {
cae5a29d
ML
3905 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3906 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
1f398472 3907 } else {
cae5a29d
ML
3908 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3909 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
f351b2d6 3910 }
352fab70 3911
5d0fb2e7
TR
3912 /* initialize shadow irq mask with register's value */
3913 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3914
352fab70 3915 /* global interrupt mask: 0 == mask everything */
c4de573b 3916 mv_set_main_irq_mask(host, ~0, 0);
bca1c4eb 3917
4447d351 3918 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 3919
4447d351 3920 for (port = 0; port < host->n_ports; port++)
29b7e43c
MM
3921 if (hpriv->ops->read_preamp)
3922 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 3923
c9d39130 3924 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 3925 if (rc)
20f733e7 3926 goto done;
20f733e7 3927
522479fb 3928 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 3929 hpriv->ops->reset_bus(host, mmio);
47c2b677 3930 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 3931
4447d351 3932 for (port = 0; port < host->n_ports; port++) {
cbcdd875 3933 struct ata_port *ap = host->ports[port];
2a47ce06 3934 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
3935
3936 mv_port_init(&ap->ioaddr, port_mmio);
20f733e7
BR
3937 }
3938
3939 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
3940 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3941
3942 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3943 "(before clear)=0x%08x\n", hc,
cae5a29d
ML
3944 readl(hc_mmio + HC_CFG),
3945 readl(hc_mmio + HC_IRQ_CAUSE));
31961943
BR
3946
3947 /* Clear any currently outstanding hc interrupt conditions */
cae5a29d 3948 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
20f733e7
BR
3949 }
3950
44c65d16
ML
3951 if (!IS_SOC(hpriv)) {
3952 /* Clear any currently outstanding host interrupt conditions */
cae5a29d 3953 writelfl(0, mmio + hpriv->irq_cause_offset);
31961943 3954
44c65d16 3955 /* and unmask interrupt generation for host regs */
cae5a29d 3956 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
44c65d16 3957 }
51de32d2 3958
6be96ac1
ML
3959 /*
3960 * enable only global host interrupts for now.
3961 * The per-port interrupts get done later as ports are set up.
3962 */
3963 mv_set_main_irq_mask(host, 0, PCI_ERR);
2b748a0a
ML
3964 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3965 irq_coalescing_usecs);
f351b2d6
SB
3966done:
3967 return rc;
3968}
fb621e2f 3969
fbf14e2f
BB
3970static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3971{
3972 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3973 MV_CRQB_Q_SZ, 0);
3974 if (!hpriv->crqb_pool)
3975 return -ENOMEM;
3976
3977 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3978 MV_CRPB_Q_SZ, 0);
3979 if (!hpriv->crpb_pool)
3980 return -ENOMEM;
3981
3982 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3983 MV_SG_TBL_SZ, 0);
3984 if (!hpriv->sg_tbl_pool)
3985 return -ENOMEM;
3986
3987 return 0;
3988}
3989
15a32632
LB
3990static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3991 struct mbus_dram_target_info *dram)
3992{
3993 int i;
3994
3995 for (i = 0; i < 4; i++) {
3996 writel(0, hpriv->base + WINDOW_CTRL(i));
3997 writel(0, hpriv->base + WINDOW_BASE(i));
3998 }
3999
4000 for (i = 0; i < dram->num_cs; i++) {
4001 struct mbus_dram_window *cs = dram->cs + i;
4002
4003 writel(((cs->size - 1) & 0xffff0000) |
4004 (cs->mbus_attr << 8) |
4005 (dram->mbus_dram_target_id << 4) | 1,
4006 hpriv->base + WINDOW_CTRL(i));
4007 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4008 }
4009}
4010
f351b2d6
SB
4011/**
4012 * mv_platform_probe - handle a positive probe of an soc Marvell
4013 * host
4014 * @pdev: platform device found
4015 *
4016 * LOCKING:
4017 * Inherited from caller.
4018 */
4019static int mv_platform_probe(struct platform_device *pdev)
4020{
f351b2d6
SB
4021 const struct mv_sata_platform_data *mv_platform_data;
4022 const struct ata_port_info *ppi[] =
4023 { &mv_port_info[chip_soc], NULL };
4024 struct ata_host *host;
4025 struct mv_host_priv *hpriv;
4026 struct resource *res;
4027 int n_ports, rc;
20f733e7 4028
06296a1e 4029 ata_print_version_once(&pdev->dev, DRV_VERSION);
bca1c4eb 4030
f351b2d6
SB
4031 /*
4032 * Simple resource validation ..
4033 */
4034 if (unlikely(pdev->num_resources != 2)) {
4035 dev_err(&pdev->dev, "invalid number of resources\n");
4036 return -EINVAL;
4037 }
4038
4039 /*
4040 * Get the register base first
4041 */
4042 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4043 if (res == NULL)
4044 return -EINVAL;
4045
4046 /* allocate host */
4047 mv_platform_data = pdev->dev.platform_data;
4048 n_ports = mv_platform_data->n_ports;
4049
4050 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4051 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4052
4053 if (!host || !hpriv)
4054 return -ENOMEM;
4055 host->private_data = hpriv;
4056 hpriv->n_ports = n_ports;
1bfeff03 4057 hpriv->board_idx = chip_soc;
f351b2d6
SB
4058
4059 host->iomap = NULL;
f1cb0ea1 4060 hpriv->base = devm_ioremap(&pdev->dev, res->start,
041b5eac 4061 resource_size(res));
cae5a29d 4062 hpriv->base -= SATAHC0_REG_BASE;
f351b2d6 4063
c77a2f4e
SB
4064#if defined(CONFIG_HAVE_CLK)
4065 hpriv->clk = clk_get(&pdev->dev, NULL);
4066 if (IS_ERR(hpriv->clk))
4067 dev_notice(&pdev->dev, "cannot get clkdev\n");
4068 else
4069 clk_enable(hpriv->clk);
4070#endif
4071
15a32632
LB
4072 /*
4073 * (Re-)program MBUS remapping windows if we are asked to.
4074 */
4075 if (mv_platform_data->dram != NULL)
4076 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4077
fbf14e2f
BB
4078 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4079 if (rc)
c77a2f4e 4080 goto err;
fbf14e2f 4081
f351b2d6 4082 /* initialize adapter */
1bfeff03 4083 rc = mv_init_host(host);
f351b2d6 4084 if (rc)
c77a2f4e 4085 goto err;
f351b2d6 4086
a44fec1f
JP
4087 dev_info(&pdev->dev, "slots %u ports %d\n",
4088 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
f351b2d6
SB
4089
4090 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4091 IRQF_SHARED, &mv6_sht);
c77a2f4e
SB
4092err:
4093#if defined(CONFIG_HAVE_CLK)
4094 if (!IS_ERR(hpriv->clk)) {
4095 clk_disable(hpriv->clk);
4096 clk_put(hpriv->clk);
4097 }
4098#endif
4099
4100 return rc;
f351b2d6
SB
4101}
4102
4103/*
4104 *
4105 * mv_platform_remove - unplug a platform interface
4106 * @pdev: platform device
4107 *
4108 * A platform bus SATA device has been unplugged. Perform the needed
4109 * cleanup. Also called on module unload for any active devices.
4110 */
4111static int __devexit mv_platform_remove(struct platform_device *pdev)
4112{
4113 struct device *dev = &pdev->dev;
4114 struct ata_host *host = dev_get_drvdata(dev);
c77a2f4e
SB
4115#if defined(CONFIG_HAVE_CLK)
4116 struct mv_host_priv *hpriv = host->private_data;
4117#endif
f351b2d6 4118 ata_host_detach(host);
c77a2f4e
SB
4119
4120#if defined(CONFIG_HAVE_CLK)
4121 if (!IS_ERR(hpriv->clk)) {
4122 clk_disable(hpriv->clk);
4123 clk_put(hpriv->clk);
4124 }
4125#endif
f351b2d6 4126 return 0;
20f733e7
BR
4127}
4128
6481f2b5
SB
4129#ifdef CONFIG_PM
4130static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4131{
4132 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4133 if (host)
4134 return ata_host_suspend(host, state);
4135 else
4136 return 0;
4137}
4138
4139static int mv_platform_resume(struct platform_device *pdev)
4140{
4141 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4142 int ret;
4143
4144 if (host) {
4145 struct mv_host_priv *hpriv = host->private_data;
4146 const struct mv_sata_platform_data *mv_platform_data = \
4147 pdev->dev.platform_data;
4148 /*
4149 * (Re-)program MBUS remapping windows if we are asked to.
4150 */
4151 if (mv_platform_data->dram != NULL)
4152 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4153
4154 /* initialize adapter */
1bfeff03 4155 ret = mv_init_host(host);
6481f2b5
SB
4156 if (ret) {
4157 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4158 return ret;
4159 }
4160 ata_host_resume(host);
4161 }
4162
4163 return 0;
4164}
4165#else
4166#define mv_platform_suspend NULL
4167#define mv_platform_resume NULL
4168#endif
4169
f351b2d6
SB
4170static struct platform_driver mv_platform_driver = {
4171 .probe = mv_platform_probe,
4172 .remove = __devexit_p(mv_platform_remove),
6481f2b5
SB
4173 .suspend = mv_platform_suspend,
4174 .resume = mv_platform_resume,
f351b2d6
SB
4175 .driver = {
4176 .name = DRV_NAME,
4177 .owner = THIS_MODULE,
4178 },
4179};
4180
4181
7bb3c529 4182#ifdef CONFIG_PCI
f351b2d6
SB
4183static int mv_pci_init_one(struct pci_dev *pdev,
4184 const struct pci_device_id *ent);
b2dec48c
SB
4185#ifdef CONFIG_PM
4186static int mv_pci_device_resume(struct pci_dev *pdev);
4187#endif
f351b2d6 4188
7bb3c529
SB
4189
4190static struct pci_driver mv_pci_driver = {
4191 .name = DRV_NAME,
4192 .id_table = mv_pci_tbl,
f351b2d6 4193 .probe = mv_pci_init_one,
7bb3c529 4194 .remove = ata_pci_remove_one,
b2dec48c
SB
4195#ifdef CONFIG_PM
4196 .suspend = ata_pci_device_suspend,
4197 .resume = mv_pci_device_resume,
4198#endif
4199
7bb3c529
SB
4200};
4201
7bb3c529
SB
4202/* move to PCI layer or libata core? */
4203static int pci_go_64(struct pci_dev *pdev)
4204{
4205 int rc;
4206
6a35528a
YH
4207 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4208 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
7bb3c529 4209 if (rc) {
284901a9 4210 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529 4211 if (rc) {
a44fec1f
JP
4212 dev_err(&pdev->dev,
4213 "64-bit DMA enable failed\n");
7bb3c529
SB
4214 return rc;
4215 }
4216 }
4217 } else {
284901a9 4218 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529 4219 if (rc) {
a44fec1f 4220 dev_err(&pdev->dev, "32-bit DMA enable failed\n");
7bb3c529
SB
4221 return rc;
4222 }
284901a9 4223 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529 4224 if (rc) {
a44fec1f
JP
4225 dev_err(&pdev->dev,
4226 "32-bit consistent DMA enable failed\n");
7bb3c529
SB
4227 return rc;
4228 }
4229 }
4230
4231 return rc;
4232}
4233
05b308e1
BR
4234/**
4235 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 4236 * @host: ATA host to print info about
05b308e1
BR
4237 *
4238 * FIXME: complete this.
4239 *
4240 * LOCKING:
4241 * Inherited from caller.
4242 */
4447d351 4243static void mv_print_info(struct ata_host *host)
31961943 4244{
4447d351
TH
4245 struct pci_dev *pdev = to_pci_dev(host->dev);
4246 struct mv_host_priv *hpriv = host->private_data;
44c10138 4247 u8 scc;
c1e4fe71 4248 const char *scc_s, *gen;
31961943
BR
4249
4250 /* Use this to determine the HW stepping of the chip so we know
4251 * what errata to workaround
4252 */
31961943
BR
4253 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4254 if (scc == 0)
4255 scc_s = "SCSI";
4256 else if (scc == 0x01)
4257 scc_s = "RAID";
4258 else
c1e4fe71
JG
4259 scc_s = "?";
4260
4261 if (IS_GEN_I(hpriv))
4262 gen = "I";
4263 else if (IS_GEN_II(hpriv))
4264 gen = "II";
4265 else if (IS_GEN_IIE(hpriv))
4266 gen = "IIE";
4267 else
4268 gen = "?";
31961943 4269
a44fec1f
JP
4270 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4271 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4272 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
31961943
BR
4273}
4274
05b308e1 4275/**
f351b2d6 4276 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
4277 * @pdev: PCI device found
4278 * @ent: PCI device ID entry for the matched host
4279 *
4280 * LOCKING:
4281 * Inherited from caller.
4282 */
f351b2d6
SB
4283static int mv_pci_init_one(struct pci_dev *pdev,
4284 const struct pci_device_id *ent)
20f733e7 4285{
20f733e7 4286 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
4287 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4288 struct ata_host *host;
4289 struct mv_host_priv *hpriv;
c4bc7d73 4290 int n_ports, port, rc;
20f733e7 4291
06296a1e 4292 ata_print_version_once(&pdev->dev, DRV_VERSION);
20f733e7 4293
4447d351
TH
4294 /* allocate host */
4295 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4296
4297 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4298 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4299 if (!host || !hpriv)
4300 return -ENOMEM;
4301 host->private_data = hpriv;
f351b2d6 4302 hpriv->n_ports = n_ports;
1bfeff03 4303 hpriv->board_idx = board_idx;
4447d351
TH
4304
4305 /* acquire resources */
24dc5f33
TH
4306 rc = pcim_enable_device(pdev);
4307 if (rc)
20f733e7 4308 return rc;
20f733e7 4309
0d5ff566
TH
4310 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4311 if (rc == -EBUSY)
24dc5f33 4312 pcim_pin_device(pdev);
0d5ff566 4313 if (rc)
24dc5f33 4314 return rc;
4447d351 4315 host->iomap = pcim_iomap_table(pdev);
f351b2d6 4316 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 4317
d88184fb
JG
4318 rc = pci_go_64(pdev);
4319 if (rc)
4320 return rc;
4321
da2fa9ba
ML
4322 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4323 if (rc)
4324 return rc;
4325
c4bc7d73
SB
4326 for (port = 0; port < host->n_ports; port++) {
4327 struct ata_port *ap = host->ports[port];
4328 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4329 unsigned int offset = port_mmio - hpriv->base;
4330
4331 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4332 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4333 }
4334
20f733e7 4335 /* initialize adapter */
1bfeff03 4336 rc = mv_init_host(host);
24dc5f33
TH
4337 if (rc)
4338 return rc;
20f733e7 4339
6d3c30ef
ML
4340 /* Enable message-switched interrupts, if requested */
4341 if (msi && pci_enable_msi(pdev) == 0)
4342 hpriv->hp_flags |= MV_HP_FLAG_MSI;
20f733e7 4343
31961943 4344 mv_dump_pci_cfg(pdev, 0x68);
4447d351 4345 mv_print_info(host);
20f733e7 4346
4447d351 4347 pci_set_master(pdev);
ea8b4db9 4348 pci_try_set_mwi(pdev);
4447d351 4349 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 4350 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 4351}
b2dec48c
SB
4352
4353#ifdef CONFIG_PM
4354static int mv_pci_device_resume(struct pci_dev *pdev)
4355{
4356 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4357 int rc;
4358
4359 rc = ata_pci_device_do_resume(pdev);
4360 if (rc)
4361 return rc;
4362
4363 /* initialize adapter */
4364 rc = mv_init_host(host);
4365 if (rc)
4366 return rc;
4367
4368 ata_host_resume(host);
4369
4370 return 0;
4371}
4372#endif
7bb3c529 4373#endif
20f733e7 4374
f351b2d6
SB
4375static int mv_platform_probe(struct platform_device *pdev);
4376static int __devexit mv_platform_remove(struct platform_device *pdev);
4377
20f733e7
BR
4378static int __init mv_init(void)
4379{
7bb3c529
SB
4380 int rc = -ENODEV;
4381#ifdef CONFIG_PCI
4382 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
4383 if (rc < 0)
4384 return rc;
4385#endif
4386 rc = platform_driver_register(&mv_platform_driver);
4387
4388#ifdef CONFIG_PCI
4389 if (rc < 0)
4390 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
4391#endif
4392 return rc;
20f733e7
BR
4393}
4394
4395static void __exit mv_exit(void)
4396{
7bb3c529 4397#ifdef CONFIG_PCI
20f733e7 4398 pci_unregister_driver(&mv_pci_driver);
7bb3c529 4399#endif
f351b2d6 4400 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
4401}
4402
4403MODULE_AUTHOR("Brett Russ");
4404MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4405MODULE_LICENSE("GPL");
4406MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4407MODULE_VERSION(DRV_VERSION);
17c5aab5 4408MODULE_ALIAS("platform:" DRV_NAME);
20f733e7
BR
4409
4410module_init(mv_init);
4411module_exit(mv_exit);