libata: implement and use SHT initializers
[linux-2.6-block.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
1fd2e1c2
ML
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
4a05e209
JG
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
4a05e209
JG
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
4a05e209
JG
62*/
63
64
20f733e7
BR
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
8d8b6004 72#include <linux/dmapool.h>
20f733e7 73#include <linux/dma-mapping.h>
a9524a76 74#include <linux/device.h>
f351b2d6
SB
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
6c08772e 79#include <scsi/scsi_device.h>
20f733e7 80#include <linux/libata.h>
20f733e7
BR
81
82#define DRV_NAME "sata_mv"
1fd2e1c2 83#define DRV_VERSION "1.20"
20f733e7
BR
84
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
20f733e7 102 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 103 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
20f733e7
BR
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
31961943
BR
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 121 MV_MAX_SG_CT = 256,
31961943 122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 123
20f733e7
BR
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
31961943 127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529
SB
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
c5d3e45a 136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
20f733e7
BR
155 /* PCI interface registers */
156
31961943
BR
157 PCI_COMMAND_OFS = 0xc00,
158
20f733e7
BR
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
522479fb
JG
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
02a121da
ML
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
02a121da
ML
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 182
20f733e7
BR
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
f351b2d6 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
fb621e2f
JG
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
f351b2d6 208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
31961943 214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
31961943
BR
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
0c58912e 225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 226 PHY_MODE3 = 0x310,
bca1c4eb
JG
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
c9d39130
JG
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
0c58912e
ML
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 260
6c1153e0 261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
6c1153e0 267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 268
6c1153e0 269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
6c1153e0 276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 277
6c1153e0 278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
bdd4ddde
JG
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
6c1153e0 293 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
20f733e7 312
31961943
BR
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
0ea9e179
JG
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 328
c9d39130 329 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 330 EDMA_ARB_CFG = 0x38,
bca1c4eb 331
31961943
BR
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 338 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 343
31961943 344 /* Port private flags (pp_flags) */
0ea9e179 345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
348};
349
ee9ccdf7
JG
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 354
095fec88 355enum {
baf14aa1
JG
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
095fec88 360
0ea9e179
JG
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
095fec88
JG
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
0ea9e179 366 /* ditto, for response queue */
095fec88
JG
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
522479fb
JG
370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
e4e7b892
JG
376 chip_6042,
377 chip_7042,
f351b2d6 378 chip_soc,
522479fb
JG
379};
380
31961943
BR
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
e1469874
ML
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
31961943 387};
20f733e7 388
e4e7b892 389struct mv_crqb_iie {
e1469874
ML
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
e4e7b892
JG
395};
396
31961943
BR
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
e1469874
ML
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
20f733e7
BR
402};
403
31961943
BR
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
e1469874
ML
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
31961943 410};
20f733e7 411
31961943
BR
412struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
eb73d558
ML
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
31961943
BR
423 u32 pp_flags;
424};
425
bca1c4eb
JG
426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
02a121da
ML
431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
f351b2d6
SB
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
02a121da
ML
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
da2fa9ba
ML
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
02a121da
ML
450};
451
47c2b677 452struct mv_hw_ops {
2a47ce06
JG
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb 460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
462};
463
da3dbb17
TH
464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde 473static void mv_error_handler(struct ata_port *ap);
bdd4ddde
JG
474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
f273827e 476static void mv6_dev_config(struct ata_device *dev);
20f733e7 477
2a47ce06
JG
478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
47c2b677
JG
480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
c9d39130
JG
483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
522479fb 485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 487
2a47ce06
JG
488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
47c2b677
JG
490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
c9d39130
JG
493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
522479fb 495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
c9d39130
JG
506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
72109168
ML
508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
47c2b677 511
eb73d558
ML
512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
c5d3e45a 516static struct scsi_host_template mv5_sht = {
68d1d07b 517 ATA_BASE_SHT(DRV_NAME),
baf14aa1 518 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 519 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
520};
521
522static struct scsi_host_template mv6_sht = {
68d1d07b 523 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 524 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 525 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 526 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
527};
528
c9d39130 529static const struct ata_port_operations mv5_ops = {
c9d39130
JG
530 .tf_load = ata_tf_load,
531 .tf_read = ata_tf_read,
532 .check_status = ata_check_status,
533 .exec_command = ata_exec_command,
534 .dev_select = ata_std_dev_select,
535
c9d39130
JG
536 .qc_prep = mv_qc_prep,
537 .qc_issue = mv_qc_issue,
0d5ff566 538 .data_xfer = ata_data_xfer,
c9d39130 539
358f9a77 540 .irq_clear = ata_noop_irq_clear,
246ce3b6 541 .irq_on = ata_irq_on,
c9d39130 542
bdd4ddde 543 .error_handler = mv_error_handler,
bdd4ddde
JG
544 .freeze = mv_eh_freeze,
545 .thaw = mv_eh_thaw,
546
c9d39130
JG
547 .scr_read = mv5_scr_read,
548 .scr_write = mv5_scr_write,
549
550 .port_start = mv_port_start,
551 .port_stop = mv_port_stop,
c9d39130
JG
552};
553
554static const struct ata_port_operations mv6_ops = {
f273827e 555 .dev_config = mv6_dev_config,
20f733e7
BR
556 .tf_load = ata_tf_load,
557 .tf_read = ata_tf_read,
558 .check_status = ata_check_status,
559 .exec_command = ata_exec_command,
560 .dev_select = ata_std_dev_select,
561
31961943
BR
562 .qc_prep = mv_qc_prep,
563 .qc_issue = mv_qc_issue,
0d5ff566 564 .data_xfer = ata_data_xfer,
20f733e7 565
358f9a77 566 .irq_clear = ata_noop_irq_clear,
246ce3b6 567 .irq_on = ata_irq_on,
20f733e7 568
bdd4ddde 569 .error_handler = mv_error_handler,
bdd4ddde
JG
570 .freeze = mv_eh_freeze,
571 .thaw = mv_eh_thaw,
138bfdd0 572 .qc_defer = ata_std_qc_defer,
bdd4ddde 573
20f733e7
BR
574 .scr_read = mv_scr_read,
575 .scr_write = mv_scr_write,
576
31961943
BR
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
20f733e7
BR
579};
580
e4e7b892 581static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
582 .tf_load = ata_tf_load,
583 .tf_read = ata_tf_read,
584 .check_status = ata_check_status,
585 .exec_command = ata_exec_command,
586 .dev_select = ata_std_dev_select,
587
e4e7b892
JG
588 .qc_prep = mv_qc_prep_iie,
589 .qc_issue = mv_qc_issue,
0d5ff566 590 .data_xfer = ata_data_xfer,
e4e7b892 591
358f9a77 592 .irq_clear = ata_noop_irq_clear,
246ce3b6 593 .irq_on = ata_irq_on,
e4e7b892 594
bdd4ddde 595 .error_handler = mv_error_handler,
bdd4ddde
JG
596 .freeze = mv_eh_freeze,
597 .thaw = mv_eh_thaw,
138bfdd0 598 .qc_defer = ata_std_qc_defer,
bdd4ddde 599
e4e7b892
JG
600 .scr_read = mv_scr_read,
601 .scr_write = mv_scr_write,
602
603 .port_start = mv_port_start,
604 .port_stop = mv_port_stop,
e4e7b892
JG
605};
606
98ac62de 607static const struct ata_port_info mv_port_info[] = {
20f733e7 608 { /* chip_504x */
cca3974e 609 .flags = MV_COMMON_FLAGS,
31961943 610 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 611 .udma_mask = ATA_UDMA6,
c9d39130 612 .port_ops = &mv5_ops,
20f733e7
BR
613 },
614 { /* chip_508x */
c5d3e45a 615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 616 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 617 .udma_mask = ATA_UDMA6,
c9d39130 618 .port_ops = &mv5_ops,
20f733e7 619 },
47c2b677 620 { /* chip_5080 */
c5d3e45a 621 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 622 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 623 .udma_mask = ATA_UDMA6,
c9d39130 624 .port_ops = &mv5_ops,
47c2b677 625 },
20f733e7 626 { /* chip_604x */
138bfdd0
ML
627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
628 ATA_FLAG_NCQ,
31961943 629 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 630 .udma_mask = ATA_UDMA6,
c9d39130 631 .port_ops = &mv6_ops,
20f733e7
BR
632 },
633 { /* chip_608x */
c5d3e45a 634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
138bfdd0 635 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 636 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 637 .udma_mask = ATA_UDMA6,
c9d39130 638 .port_ops = &mv6_ops,
20f733e7 639 },
e4e7b892 640 { /* chip_6042 */
138bfdd0
ML
641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642 ATA_FLAG_NCQ,
e4e7b892 643 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 644 .udma_mask = ATA_UDMA6,
e4e7b892
JG
645 .port_ops = &mv_iie_ops,
646 },
647 { /* chip_7042 */
138bfdd0
ML
648 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
649 ATA_FLAG_NCQ,
e4e7b892 650 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 651 .udma_mask = ATA_UDMA6,
e4e7b892
JG
652 .port_ops = &mv_iie_ops,
653 },
f351b2d6
SB
654 { /* chip_soc */
655 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
656 .pio_mask = 0x1f, /* pio0-4 */
657 .udma_mask = ATA_UDMA6,
658 .port_ops = &mv_iie_ops,
659 },
20f733e7
BR
660};
661
3b7d697d 662static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
663 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
665 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
666 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
667 /* RocketRAID 1740/174x have different identifiers */
668 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
669 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
670
671 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
673 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
674 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
675 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
676
677 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
678
d9f9c6bc
FA
679 /* Adaptec 1430SA */
680 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
681
02a121da 682 /* Marvell 7042 support */
6a3d586d
MT
683 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
684
02a121da
ML
685 /* Highpoint RocketRAID PCIe series */
686 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
687 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
688
2d2744fc 689 { } /* terminate list */
20f733e7
BR
690};
691
47c2b677
JG
692static const struct mv_hw_ops mv5xxx_ops = {
693 .phy_errata = mv5_phy_errata,
694 .enable_leds = mv5_enable_leds,
695 .read_preamp = mv5_read_preamp,
696 .reset_hc = mv5_reset_hc,
522479fb
JG
697 .reset_flash = mv5_reset_flash,
698 .reset_bus = mv5_reset_bus,
47c2b677
JG
699};
700
701static const struct mv_hw_ops mv6xxx_ops = {
702 .phy_errata = mv6_phy_errata,
703 .enable_leds = mv6_enable_leds,
704 .read_preamp = mv6_read_preamp,
705 .reset_hc = mv6_reset_hc,
522479fb
JG
706 .reset_flash = mv6_reset_flash,
707 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
708};
709
f351b2d6
SB
710static const struct mv_hw_ops mv_soc_ops = {
711 .phy_errata = mv6_phy_errata,
712 .enable_leds = mv_soc_enable_leds,
713 .read_preamp = mv_soc_read_preamp,
714 .reset_hc = mv_soc_reset_hc,
715 .reset_flash = mv_soc_reset_flash,
716 .reset_bus = mv_soc_reset_bus,
717};
718
20f733e7
BR
719/*
720 * Functions
721 */
722
723static inline void writelfl(unsigned long data, void __iomem *addr)
724{
725 writel(data, addr);
726 (void) readl(addr); /* flush to avoid PCI posted write */
727}
728
20f733e7
BR
729static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
730{
731 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
732}
733
c9d39130
JG
734static inline unsigned int mv_hc_from_port(unsigned int port)
735{
736 return port >> MV_PORT_HC_SHIFT;
737}
738
739static inline unsigned int mv_hardport_from_port(unsigned int port)
740{
741 return port & MV_PORT_MASK;
742}
743
744static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
745 unsigned int port)
746{
747 return mv_hc_base(base, mv_hc_from_port(port));
748}
749
20f733e7
BR
750static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
751{
c9d39130 752 return mv_hc_base_from_port(base, port) +
8b260248 753 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 754 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
755}
756
f351b2d6
SB
757static inline void __iomem *mv_host_base(struct ata_host *host)
758{
759 struct mv_host_priv *hpriv = host->private_data;
760 return hpriv->base;
761}
762
20f733e7
BR
763static inline void __iomem *mv_ap_base(struct ata_port *ap)
764{
f351b2d6 765 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
766}
767
cca3974e 768static inline int mv_get_hc_count(unsigned long port_flags)
31961943 769{
cca3974e 770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
771}
772
c5d3e45a
JG
773static void mv_set_edma_ptrs(void __iomem *port_mmio,
774 struct mv_host_priv *hpriv,
775 struct mv_port_priv *pp)
776{
bdd4ddde
JG
777 u32 index;
778
c5d3e45a
JG
779 /*
780 * initialize request queue
781 */
bdd4ddde
JG
782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
783
c5d3e45a
JG
784 WARN_ON(pp->crqb_dma & 0x3ff);
785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
788
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 790 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
792 else
bdd4ddde 793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
794
795 /*
796 * initialize response queue
797 */
bdd4ddde
JG
798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
799
c5d3e45a
JG
800 WARN_ON(pp->crpb_dma & 0xff);
801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
802
803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 804 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
806 else
bdd4ddde 807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 808
bdd4ddde 809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
811}
812
05b308e1
BR
813/**
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
817 *
beec7dbc
TH
818 * Verify the local cache of the eDMA state is accurate with a
819 * WARN_ON.
05b308e1
BR
820 *
821 * LOCKING:
822 * Inherited from caller.
823 */
0c58912e 824static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 825 struct mv_port_priv *pp, u8 protocol)
20f733e7 826{
72109168
ML
827 int want_ncq = (protocol == ATA_PROT_NCQ);
828
829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831 if (want_ncq != using_ncq)
832 __mv_stop_dma(ap);
833 }
c5d3e45a 834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
835 struct mv_host_priv *hpriv = ap->host->private_data;
836 int hard_port = mv_hardport_from_port(ap->port_no);
837 void __iomem *hc_mmio = mv_hc_base_from_port(
0fca0d6f 838 mv_host_base(ap->host), hard_port);
0c58912e
ML
839 u32 hc_irq_cause, ipending;
840
bdd4ddde 841 /* clear EDMA event indicators, if any */
f630d562 842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 843
0c58912e
ML
844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846 ipending = (DEV_IRQ << hard_port) |
847 (CRPB_DMA_DONE << hard_port);
848 if (hc_irq_cause & ipending) {
849 writelfl(hc_irq_cause & ~ipending,
850 hc_mmio + HC_IRQ_CAUSE_OFS);
851 }
852
72109168 853 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
854
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
857
f630d562 858 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 859
f630d562 860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
862 }
f630d562 863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
864}
865
05b308e1 866/**
0ea9e179 867 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
868 * @ap: ATA channel to manipulate
869 *
beec7dbc
TH
870 * Verify the local cache of the eDMA state is accurate with a
871 * WARN_ON.
05b308e1
BR
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
0ea9e179 876static int __mv_stop_dma(struct ata_port *ap)
20f733e7 877{
31961943
BR
878 void __iomem *port_mmio = mv_ap_base(ap);
879 struct mv_port_priv *pp = ap->private_data;
31961943 880 u32 reg;
c5d3e45a 881 int i, err = 0;
31961943 882
4537deb5 883 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 884 /* Disable EDMA if active. The disable bit auto clears.
31961943 885 */
31961943
BR
886 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
887 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 888 } else {
beec7dbc 889 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 890 }
8b260248 891
31961943
BR
892 /* now properly wait for the eDMA to stop */
893 for (i = 1000; i > 0; i--) {
894 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 895 if (!(reg & EDMA_EN))
31961943 896 break;
4537deb5 897
31961943
BR
898 udelay(100);
899 }
900
c5d3e45a 901 if (reg & EDMA_EN) {
f15a1daf 902 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 903 err = -EIO;
31961943 904 }
c5d3e45a
JG
905
906 return err;
20f733e7
BR
907}
908
0ea9e179
JG
909static int mv_stop_dma(struct ata_port *ap)
910{
911 unsigned long flags;
912 int rc;
913
914 spin_lock_irqsave(&ap->host->lock, flags);
915 rc = __mv_stop_dma(ap);
916 spin_unlock_irqrestore(&ap->host->lock, flags);
917
918 return rc;
919}
920
8a70f8dc 921#ifdef ATA_DEBUG
31961943 922static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 923{
31961943
BR
924 int b, w;
925 for (b = 0; b < bytes; ) {
926 DPRINTK("%p: ", start + b);
927 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 928 printk("%08x ", readl(start + b));
31961943
BR
929 b += sizeof(u32);
930 }
931 printk("\n");
932 }
31961943 933}
8a70f8dc
JG
934#endif
935
31961943
BR
936static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
937{
938#ifdef ATA_DEBUG
939 int b, w;
940 u32 dw;
941 for (b = 0; b < bytes; ) {
942 DPRINTK("%02x: ", b);
943 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
944 (void) pci_read_config_dword(pdev, b, &dw);
945 printk("%08x ", dw);
31961943
BR
946 b += sizeof(u32);
947 }
948 printk("\n");
949 }
950#endif
951}
952static void mv_dump_all_regs(void __iomem *mmio_base, int port,
953 struct pci_dev *pdev)
954{
955#ifdef ATA_DEBUG
8b260248 956 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
957 port >> MV_PORT_HC_SHIFT);
958 void __iomem *port_base;
959 int start_port, num_ports, p, start_hc, num_hcs, hc;
960
961 if (0 > port) {
962 start_hc = start_port = 0;
963 num_ports = 8; /* shld be benign for 4 port devs */
964 num_hcs = 2;
965 } else {
966 start_hc = port >> MV_PORT_HC_SHIFT;
967 start_port = port;
968 num_ports = num_hcs = 1;
969 }
8b260248 970 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
971 num_ports > 1 ? num_ports - 1 : start_port);
972
973 if (NULL != pdev) {
974 DPRINTK("PCI config space regs:\n");
975 mv_dump_pci_cfg(pdev, 0x68);
976 }
977 DPRINTK("PCI regs:\n");
978 mv_dump_mem(mmio_base+0xc00, 0x3c);
979 mv_dump_mem(mmio_base+0xd00, 0x34);
980 mv_dump_mem(mmio_base+0xf00, 0x4);
981 mv_dump_mem(mmio_base+0x1d00, 0x6c);
982 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 983 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
984 DPRINTK("HC regs (HC %i):\n", hc);
985 mv_dump_mem(hc_base, 0x1c);
986 }
987 for (p = start_port; p < start_port + num_ports; p++) {
988 port_base = mv_port_base(mmio_base, p);
2dcb407e 989 DPRINTK("EDMA regs (port %i):\n", p);
31961943 990 mv_dump_mem(port_base, 0x54);
2dcb407e 991 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
992 mv_dump_mem(port_base+0x300, 0x60);
993 }
994#endif
20f733e7
BR
995}
996
997static unsigned int mv_scr_offset(unsigned int sc_reg_in)
998{
999 unsigned int ofs;
1000
1001 switch (sc_reg_in) {
1002 case SCR_STATUS:
1003 case SCR_CONTROL:
1004 case SCR_ERROR:
1005 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1006 break;
1007 case SCR_ACTIVE:
1008 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1009 break;
1010 default:
1011 ofs = 0xffffffffU;
1012 break;
1013 }
1014 return ofs;
1015}
1016
da3dbb17 1017static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1018{
1019 unsigned int ofs = mv_scr_offset(sc_reg_in);
1020
da3dbb17
TH
1021 if (ofs != 0xffffffffU) {
1022 *val = readl(mv_ap_base(ap) + ofs);
1023 return 0;
1024 } else
1025 return -EINVAL;
20f733e7
BR
1026}
1027
da3dbb17 1028static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1029{
1030 unsigned int ofs = mv_scr_offset(sc_reg_in);
1031
da3dbb17 1032 if (ofs != 0xffffffffU) {
20f733e7 1033 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1034 return 0;
1035 } else
1036 return -EINVAL;
20f733e7
BR
1037}
1038
f273827e
ML
1039static void mv6_dev_config(struct ata_device *adev)
1040{
1041 /*
1042 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1043 * See mv_qc_prep() for more info.
1044 */
1045 if (adev->flags & ATA_DFLAG_NCQ)
1046 if (adev->max_sectors > ATA_MAX_SECTORS)
1047 adev->max_sectors = ATA_MAX_SECTORS;
1048}
1049
72109168
ML
1050static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1051 void __iomem *port_mmio, int want_ncq)
e4e7b892 1052{
0c58912e 1053 u32 cfg;
e4e7b892
JG
1054
1055 /* set up non-NCQ EDMA configuration */
0c58912e 1056 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1057
0c58912e 1058 if (IS_GEN_I(hpriv))
e4e7b892
JG
1059 cfg |= (1 << 8); /* enab config burst size mask */
1060
0c58912e 1061 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1062 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1063
1064 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1065 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1066 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1067 cfg |= (1 << 18); /* enab early completion */
e728eabe 1068 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1069 }
1070
72109168
ML
1071 if (want_ncq) {
1072 cfg |= EDMA_CFG_NCQ;
1073 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1074 } else
1075 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1076
e4e7b892
JG
1077 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1078}
1079
da2fa9ba
ML
1080static void mv_port_free_dma_mem(struct ata_port *ap)
1081{
1082 struct mv_host_priv *hpriv = ap->host->private_data;
1083 struct mv_port_priv *pp = ap->private_data;
eb73d558 1084 int tag;
da2fa9ba
ML
1085
1086 if (pp->crqb) {
1087 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1088 pp->crqb = NULL;
1089 }
1090 if (pp->crpb) {
1091 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1092 pp->crpb = NULL;
1093 }
eb73d558
ML
1094 /*
1095 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1096 * For later hardware, we have one unique sg_tbl per NCQ tag.
1097 */
1098 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1099 if (pp->sg_tbl[tag]) {
1100 if (tag == 0 || !IS_GEN_I(hpriv))
1101 dma_pool_free(hpriv->sg_tbl_pool,
1102 pp->sg_tbl[tag],
1103 pp->sg_tbl_dma[tag]);
1104 pp->sg_tbl[tag] = NULL;
1105 }
da2fa9ba
ML
1106 }
1107}
1108
05b308e1
BR
1109/**
1110 * mv_port_start - Port specific init/start routine.
1111 * @ap: ATA channel to manipulate
1112 *
1113 * Allocate and point to DMA memory, init port private memory,
1114 * zero indices.
1115 *
1116 * LOCKING:
1117 * Inherited from caller.
1118 */
31961943
BR
1119static int mv_port_start(struct ata_port *ap)
1120{
cca3974e
JG
1121 struct device *dev = ap->host->dev;
1122 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1123 struct mv_port_priv *pp;
1124 void __iomem *port_mmio = mv_ap_base(ap);
0ea9e179 1125 unsigned long flags;
dde20207 1126 int tag;
31961943 1127
24dc5f33 1128 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1129 if (!pp)
24dc5f33 1130 return -ENOMEM;
da2fa9ba 1131 ap->private_data = pp;
31961943 1132
da2fa9ba
ML
1133 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1134 if (!pp->crqb)
1135 return -ENOMEM;
1136 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1137
da2fa9ba
ML
1138 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1139 if (!pp->crpb)
1140 goto out_port_free_dma_mem;
1141 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1142
eb73d558
ML
1143 /*
1144 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1145 * For later hardware, we need one unique sg_tbl per NCQ tag.
1146 */
1147 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1148 if (tag == 0 || !IS_GEN_I(hpriv)) {
1149 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1150 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1151 if (!pp->sg_tbl[tag])
1152 goto out_port_free_dma_mem;
1153 } else {
1154 pp->sg_tbl[tag] = pp->sg_tbl[0];
1155 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1156 }
1157 }
31961943 1158
0ea9e179
JG
1159 spin_lock_irqsave(&ap->host->lock, flags);
1160
72109168 1161 mv_edma_cfg(pp, hpriv, port_mmio, 0);
c5d3e45a 1162 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1163
0ea9e179
JG
1164 spin_unlock_irqrestore(&ap->host->lock, flags);
1165
31961943
BR
1166 /* Don't turn on EDMA here...do it before DMA commands only. Else
1167 * we'll be unable to send non-data, PIO, etc due to restricted access
1168 * to shadow regs.
1169 */
31961943 1170 return 0;
da2fa9ba
ML
1171
1172out_port_free_dma_mem:
1173 mv_port_free_dma_mem(ap);
1174 return -ENOMEM;
31961943
BR
1175}
1176
05b308e1
BR
1177/**
1178 * mv_port_stop - Port specific cleanup/stop routine.
1179 * @ap: ATA channel to manipulate
1180 *
1181 * Stop DMA, cleanup port memory.
1182 *
1183 * LOCKING:
cca3974e 1184 * This routine uses the host lock to protect the DMA stop.
05b308e1 1185 */
31961943
BR
1186static void mv_port_stop(struct ata_port *ap)
1187{
31961943 1188 mv_stop_dma(ap);
da2fa9ba 1189 mv_port_free_dma_mem(ap);
31961943
BR
1190}
1191
05b308e1
BR
1192/**
1193 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1194 * @qc: queued command whose SG list to source from
1195 *
1196 * Populate the SG list and mark the last entry.
1197 *
1198 * LOCKING:
1199 * Inherited from caller.
1200 */
6c08772e 1201static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1202{
1203 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1204 struct scatterlist *sg;
3be6cbd7 1205 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1206 unsigned int si;
31961943 1207
eb73d558 1208 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1209 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1210 dma_addr_t addr = sg_dma_address(sg);
1211 u32 sg_len = sg_dma_len(sg);
22374677 1212
4007b493
OJ
1213 while (sg_len) {
1214 u32 offset = addr & 0xffff;
1215 u32 len = sg_len;
22374677 1216
4007b493
OJ
1217 if ((offset + sg_len > 0x10000))
1218 len = 0x10000 - offset;
1219
1220 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1221 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1222 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1223
1224 sg_len -= len;
1225 addr += len;
1226
3be6cbd7 1227 last_sg = mv_sg;
4007b493 1228 mv_sg++;
4007b493 1229 }
31961943 1230 }
3be6cbd7
JG
1231
1232 if (likely(last_sg))
1233 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1234}
1235
5796d1c4 1236static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1237{
559eedad 1238 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1239 (last ? CRQB_CMD_LAST : 0);
559eedad 1240 *cmdw = cpu_to_le16(tmp);
31961943
BR
1241}
1242
05b308e1
BR
1243/**
1244 * mv_qc_prep - Host specific command preparation.
1245 * @qc: queued command to prepare
1246 *
1247 * This routine simply redirects to the general purpose routine
1248 * if command is not DMA. Else, it handles prep of the CRQB
1249 * (command request block), does some sanity checking, and calls
1250 * the SG load routine.
1251 *
1252 * LOCKING:
1253 * Inherited from caller.
1254 */
31961943
BR
1255static void mv_qc_prep(struct ata_queued_cmd *qc)
1256{
1257 struct ata_port *ap = qc->ap;
1258 struct mv_port_priv *pp = ap->private_data;
e1469874 1259 __le16 *cw;
31961943
BR
1260 struct ata_taskfile *tf;
1261 u16 flags = 0;
a6432436 1262 unsigned in_index;
31961943 1263
138bfdd0
ML
1264 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1265 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1266 return;
20f733e7 1267
31961943
BR
1268 /* Fill in command request block
1269 */
e4e7b892 1270 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1271 flags |= CRQB_FLAG_READ;
beec7dbc 1272 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1273 flags |= qc->tag << CRQB_TAG_SHIFT;
1274
bdd4ddde
JG
1275 /* get current queue index from software */
1276 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1277
1278 pp->crqb[in_index].sg_addr =
eb73d558 1279 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1280 pp->crqb[in_index].sg_addr_hi =
eb73d558 1281 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1282 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1283
a6432436 1284 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1285 tf = &qc->tf;
1286
1287 /* Sadly, the CRQB cannot accomodate all registers--there are
1288 * only 11 bytes...so we must pick and choose required
1289 * registers based on the command. So, we drop feature and
1290 * hob_feature for [RW] DMA commands, but they are needed for
1291 * NCQ. NCQ will drop hob_nsect.
20f733e7 1292 */
31961943
BR
1293 switch (tf->command) {
1294 case ATA_CMD_READ:
1295 case ATA_CMD_READ_EXT:
1296 case ATA_CMD_WRITE:
1297 case ATA_CMD_WRITE_EXT:
c15d85c8 1298 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1299 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1300 break;
31961943
BR
1301 case ATA_CMD_FPDMA_READ:
1302 case ATA_CMD_FPDMA_WRITE:
8b260248 1303 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1304 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1305 break;
31961943
BR
1306 default:
1307 /* The only other commands EDMA supports in non-queued and
1308 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1309 * of which are defined/used by Linux. If we get here, this
1310 * driver needs work.
1311 *
1312 * FIXME: modify libata to give qc_prep a return value and
1313 * return error here.
1314 */
1315 BUG_ON(tf->command);
1316 break;
1317 }
1318 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1319 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1320 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1321 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1322 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1323 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1324 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1325 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1326 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1327
e4e7b892
JG
1328 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1329 return;
1330 mv_fill_sg(qc);
1331}
1332
1333/**
1334 * mv_qc_prep_iie - Host specific command preparation.
1335 * @qc: queued command to prepare
1336 *
1337 * This routine simply redirects to the general purpose routine
1338 * if command is not DMA. Else, it handles prep of the CRQB
1339 * (command request block), does some sanity checking, and calls
1340 * the SG load routine.
1341 *
1342 * LOCKING:
1343 * Inherited from caller.
1344 */
1345static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1346{
1347 struct ata_port *ap = qc->ap;
1348 struct mv_port_priv *pp = ap->private_data;
1349 struct mv_crqb_iie *crqb;
1350 struct ata_taskfile *tf;
a6432436 1351 unsigned in_index;
e4e7b892
JG
1352 u32 flags = 0;
1353
138bfdd0
ML
1354 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1355 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1356 return;
1357
e4e7b892
JG
1358 /* Fill in Gen IIE command request block
1359 */
1360 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1361 flags |= CRQB_FLAG_READ;
1362
beec7dbc 1363 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1364 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1365 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e4e7b892 1366
bdd4ddde
JG
1367 /* get current queue index from software */
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1369
1370 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1371 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1372 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1373 crqb->flags = cpu_to_le32(flags);
1374
1375 tf = &qc->tf;
1376 crqb->ata_cmd[0] = cpu_to_le32(
1377 (tf->command << 16) |
1378 (tf->feature << 24)
1379 );
1380 crqb->ata_cmd[1] = cpu_to_le32(
1381 (tf->lbal << 0) |
1382 (tf->lbam << 8) |
1383 (tf->lbah << 16) |
1384 (tf->device << 24)
1385 );
1386 crqb->ata_cmd[2] = cpu_to_le32(
1387 (tf->hob_lbal << 0) |
1388 (tf->hob_lbam << 8) |
1389 (tf->hob_lbah << 16) |
1390 (tf->hob_feature << 24)
1391 );
1392 crqb->ata_cmd[3] = cpu_to_le32(
1393 (tf->nsect << 0) |
1394 (tf->hob_nsect << 8)
1395 );
1396
1397 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1398 return;
31961943
BR
1399 mv_fill_sg(qc);
1400}
1401
05b308e1
BR
1402/**
1403 * mv_qc_issue - Initiate a command to the host
1404 * @qc: queued command to start
1405 *
1406 * This routine simply redirects to the general purpose routine
1407 * if command is not DMA. Else, it sanity checks our local
1408 * caches of the request producer/consumer indices then enables
1409 * DMA and bumps the request producer index.
1410 *
1411 * LOCKING:
1412 * Inherited from caller.
1413 */
9a3d9eb0 1414static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1415{
c5d3e45a
JG
1416 struct ata_port *ap = qc->ap;
1417 void __iomem *port_mmio = mv_ap_base(ap);
1418 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1419 u32 in_index;
31961943 1420
138bfdd0
ML
1421 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1422 (qc->tf.protocol != ATA_PROT_NCQ)) {
31961943
BR
1423 /* We're about to send a non-EDMA capable command to the
1424 * port. Turn off EDMA so there won't be problems accessing
1425 * shadow block, etc registers.
1426 */
0ea9e179 1427 __mv_stop_dma(ap);
31961943
BR
1428 return ata_qc_issue_prot(qc);
1429 }
1430
72109168 1431 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1432
bdd4ddde 1433 pp->req_idx++;
31961943 1434
bdd4ddde 1435 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1436
1437 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1438 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1439 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1440
1441 return 0;
1442}
1443
05b308e1
BR
1444/**
1445 * mv_err_intr - Handle error interrupts on the port
1446 * @ap: ATA channel to manipulate
9b358e30 1447 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1448 *
1449 * In most cases, just clear the interrupt and move on. However,
1450 * some cases require an eDMA reset, which is done right before
1451 * the COMRESET in mv_phy_reset(). The SERR case requires a
1452 * clear of pending errors in the SATA SERROR register. Finally,
1453 * if the port disabled DMA, update our cached copy to match.
1454 *
1455 * LOCKING:
1456 * Inherited from caller.
1457 */
bdd4ddde 1458static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1459{
1460 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1461 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1462 struct mv_port_priv *pp = ap->private_data;
1463 struct mv_host_priv *hpriv = ap->host->private_data;
1464 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1465 unsigned int action = 0, err_mask = 0;
9af5c9c9 1466 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1467
bdd4ddde 1468 ata_ehi_clear_desc(ehi);
20f733e7 1469
bdd4ddde
JG
1470 if (!edma_enabled) {
1471 /* just a guess: do we need to do this? should we
1472 * expand this, and do it in all cases?
1473 */
936fd732
TH
1474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1476 }
bdd4ddde
JG
1477
1478 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1479
1480 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1481
1482 /*
1483 * all generations share these EDMA error cause bits
1484 */
1485
1486 if (edma_err_cause & EDMA_ERR_DEV)
1487 err_mask |= AC_ERR_DEV;
1488 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1489 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1490 EDMA_ERR_INTRL_PAR)) {
1491 err_mask |= AC_ERR_ATA_BUS;
cf480626 1492 action |= ATA_EH_RESET;
b64bbc39 1493 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1494 }
1495 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1496 ata_ehi_hotplugged(ehi);
1497 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1498 "dev disconnect" : "dev connect");
cf480626 1499 action |= ATA_EH_RESET;
bdd4ddde
JG
1500 }
1501
ee9ccdf7 1502 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1503 eh_freeze_mask = EDMA_EH_FREEZE_5;
1504
1505 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
5ab063e3 1506 pp = ap->private_data;
bdd4ddde 1507 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1508 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1509 }
1510 } else {
1511 eh_freeze_mask = EDMA_EH_FREEZE;
1512
1513 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
5ab063e3 1514 pp = ap->private_data;
bdd4ddde 1515 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1516 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1517 }
1518
1519 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1520 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1521 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1522 err_mask = AC_ERR_ATA_BUS;
cf480626 1523 action |= ATA_EH_RESET;
bdd4ddde 1524 }
afb0edd9 1525 }
20f733e7
BR
1526
1527 /* Clear EDMA now that SERR cleanup done */
3606a380 1528 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1529
bdd4ddde
JG
1530 if (!err_mask) {
1531 err_mask = AC_ERR_OTHER;
cf480626 1532 action |= ATA_EH_RESET;
bdd4ddde
JG
1533 }
1534
1535 ehi->serror |= serr;
1536 ehi->action |= action;
1537
1538 if (qc)
1539 qc->err_mask |= err_mask;
1540 else
1541 ehi->err_mask |= err_mask;
1542
1543 if (edma_err_cause & eh_freeze_mask)
1544 ata_port_freeze(ap);
1545 else
1546 ata_port_abort(ap);
1547}
1548
1549static void mv_intr_pio(struct ata_port *ap)
1550{
1551 struct ata_queued_cmd *qc;
1552 u8 ata_status;
1553
1554 /* ignore spurious intr if drive still BUSY */
1555 ata_status = readb(ap->ioaddr.status_addr);
1556 if (unlikely(ata_status & ATA_BUSY))
1557 return;
1558
1559 /* get active ATA command */
9af5c9c9 1560 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1561 if (unlikely(!qc)) /* no active tag */
1562 return;
1563 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1564 return;
1565
1566 /* and finally, complete the ATA command */
1567 qc->err_mask |= ac_err_mask(ata_status);
1568 ata_qc_complete(qc);
1569}
1570
1571static void mv_intr_edma(struct ata_port *ap)
1572{
1573 void __iomem *port_mmio = mv_ap_base(ap);
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 struct mv_port_priv *pp = ap->private_data;
1576 struct ata_queued_cmd *qc;
1577 u32 out_index, in_index;
1578 bool work_done = false;
1579
1580 /* get h/w response queue pointer */
1581 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1582 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1583
1584 while (1) {
1585 u16 status;
6c1153e0 1586 unsigned int tag;
bdd4ddde
JG
1587
1588 /* get s/w response queue last-read pointer, and compare */
1589 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1590 if (in_index == out_index)
1591 break;
1592
bdd4ddde 1593 /* 50xx: get active ATA command */
0ea9e179 1594 if (IS_GEN_I(hpriv))
9af5c9c9 1595 tag = ap->link.active_tag;
bdd4ddde 1596
6c1153e0
JG
1597 /* Gen II/IIE: get active ATA command via tag, to enable
1598 * support for queueing. this works transparently for
1599 * queued and non-queued modes.
bdd4ddde 1600 */
8c0aeb4a
ML
1601 else
1602 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1603
6c1153e0 1604 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1605
cb924419
ML
1606 /* For non-NCQ mode, the lower 8 bits of status
1607 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1608 * which should be zero if all went well.
bdd4ddde
JG
1609 */
1610 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1611 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1612 mv_err_intr(ap, qc);
1613 return;
1614 }
1615
1616 /* and finally, complete the ATA command */
1617 if (qc) {
1618 qc->err_mask |=
1619 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1620 ata_qc_complete(qc);
1621 }
1622
0ea9e179 1623 /* advance software response queue pointer, to
bdd4ddde
JG
1624 * indicate (after the loop completes) to hardware
1625 * that we have consumed a response queue entry.
1626 */
1627 work_done = true;
1628 pp->resp_idx++;
1629 }
1630
1631 if (work_done)
1632 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1633 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1634 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1635}
1636
05b308e1
BR
1637/**
1638 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1639 * @host: host specific structure
05b308e1
BR
1640 * @relevant: port error bits relevant to this host controller
1641 * @hc: which host controller we're to look at
1642 *
1643 * Read then write clear the HC interrupt status then walk each
1644 * port connected to the HC and see if it needs servicing. Port
1645 * success ints are reported in the HC interrupt status reg, the
1646 * port error ints are reported in the higher level main
1647 * interrupt status register and thus are passed in via the
1648 * 'relevant' argument.
1649 *
1650 * LOCKING:
1651 * Inherited from caller.
1652 */
cca3974e 1653static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1654{
f351b2d6
SB
1655 struct mv_host_priv *hpriv = host->private_data;
1656 void __iomem *mmio = hpriv->base;
20f733e7 1657 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1658 u32 hc_irq_cause;
f351b2d6 1659 int port, port0, last_port;
20f733e7 1660
35177265 1661 if (hc == 0)
20f733e7 1662 port0 = 0;
35177265 1663 else
20f733e7 1664 port0 = MV_PORTS_PER_HC;
20f733e7 1665
f351b2d6
SB
1666 if (HAS_PCI(host))
1667 last_port = port0 + MV_PORTS_PER_HC;
1668 else
1669 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1670 /* we'll need the HC success int register in most cases */
1671 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1672 if (!hc_irq_cause)
1673 return;
1674
1675 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1676
1677 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1678 hc, relevant, hc_irq_cause);
20f733e7 1679
8f71efe2 1680 for (port = port0; port < last_port; port++) {
cca3974e 1681 struct ata_port *ap = host->ports[port];
8f71efe2 1682 struct mv_port_priv *pp;
bdd4ddde 1683 int have_err_bits, hard_port, shift;
55d8ca4f 1684
bdd4ddde 1685 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1686 continue;
1687
8f71efe2
YL
1688 pp = ap->private_data;
1689
31961943 1690 shift = port << 1; /* (port * 2) */
20f733e7
BR
1691 if (port >= MV_PORTS_PER_HC) {
1692 shift++; /* skip bit 8 in the HC Main IRQ reg */
1693 }
bdd4ddde
JG
1694 have_err_bits = ((PORT0_ERR << shift) & relevant);
1695
1696 if (unlikely(have_err_bits)) {
1697 struct ata_queued_cmd *qc;
8b260248 1698
9af5c9c9 1699 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1700 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1701 continue;
1702
1703 mv_err_intr(ap, qc);
1704 continue;
1705 }
1706
1707 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1708
1709 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1710 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1711 mv_intr_edma(ap);
1712 } else {
1713 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1714 mv_intr_pio(ap);
20f733e7
BR
1715 }
1716 }
1717 VPRINTK("EXIT\n");
1718}
1719
bdd4ddde
JG
1720static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1721{
02a121da 1722 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1723 struct ata_port *ap;
1724 struct ata_queued_cmd *qc;
1725 struct ata_eh_info *ehi;
1726 unsigned int i, err_mask, printed = 0;
1727 u32 err_cause;
1728
02a121da 1729 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1730
1731 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1732 err_cause);
1733
1734 DPRINTK("All regs @ PCI error\n");
1735 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1736
02a121da 1737 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1738
1739 for (i = 0; i < host->n_ports; i++) {
1740 ap = host->ports[i];
936fd732 1741 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1742 ehi = &ap->link.eh_info;
bdd4ddde
JG
1743 ata_ehi_clear_desc(ehi);
1744 if (!printed++)
1745 ata_ehi_push_desc(ehi,
1746 "PCI err cause 0x%08x", err_cause);
1747 err_mask = AC_ERR_HOST_BUS;
cf480626 1748 ehi->action = ATA_EH_RESET;
9af5c9c9 1749 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1750 if (qc)
1751 qc->err_mask |= err_mask;
1752 else
1753 ehi->err_mask |= err_mask;
1754
1755 ata_port_freeze(ap);
1756 }
1757 }
1758}
1759
05b308e1 1760/**
c5d3e45a 1761 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1762 * @irq: unused
1763 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1764 *
1765 * Read the read only register to determine if any host
1766 * controllers have pending interrupts. If so, call lower level
1767 * routine to handle. Also check for PCI errors which are only
1768 * reported here.
1769 *
8b260248 1770 * LOCKING:
cca3974e 1771 * This routine holds the host lock while processing pending
05b308e1
BR
1772 * interrupts.
1773 */
7d12e780 1774static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1775{
cca3974e 1776 struct ata_host *host = dev_instance;
f351b2d6 1777 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1778 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1779 void __iomem *mmio = hpriv->base;
646a4da5 1780 u32 irq_stat, irq_mask;
20f733e7 1781
646a4da5 1782 spin_lock(&host->lock);
f351b2d6
SB
1783
1784 irq_stat = readl(hpriv->main_cause_reg_addr);
1785 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1786
1787 /* check the cases where we either have nothing pending or have read
1788 * a bogus register value which can indicate HW removal or PCI fault
1789 */
646a4da5
ML
1790 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1791 goto out_unlock;
20f733e7 1792
cca3974e 1793 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1794
7bb3c529 1795 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1796 mv_pci_error(host, mmio);
1797 handled = 1;
1798 goto out_unlock; /* skip all other HC irq handling */
1799 }
1800
20f733e7
BR
1801 for (hc = 0; hc < n_hcs; hc++) {
1802 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1803 if (relevant) {
cca3974e 1804 mv_host_intr(host, relevant, hc);
bdd4ddde 1805 handled = 1;
20f733e7
BR
1806 }
1807 }
615ab953 1808
bdd4ddde 1809out_unlock:
cca3974e 1810 spin_unlock(&host->lock);
20f733e7
BR
1811
1812 return IRQ_RETVAL(handled);
1813}
1814
c9d39130
JG
1815static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1816{
1817 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1818 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1819
1820 return hc_mmio + ofs;
1821}
1822
1823static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1824{
1825 unsigned int ofs;
1826
1827 switch (sc_reg_in) {
1828 case SCR_STATUS:
1829 case SCR_ERROR:
1830 case SCR_CONTROL:
1831 ofs = sc_reg_in * sizeof(u32);
1832 break;
1833 default:
1834 ofs = 0xffffffffU;
1835 break;
1836 }
1837 return ofs;
1838}
1839
da3dbb17 1840static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1841{
f351b2d6
SB
1842 struct mv_host_priv *hpriv = ap->host->private_data;
1843 void __iomem *mmio = hpriv->base;
0d5ff566 1844 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1845 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1846
da3dbb17
TH
1847 if (ofs != 0xffffffffU) {
1848 *val = readl(addr + ofs);
1849 return 0;
1850 } else
1851 return -EINVAL;
c9d39130
JG
1852}
1853
da3dbb17 1854static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1855{
f351b2d6
SB
1856 struct mv_host_priv *hpriv = ap->host->private_data;
1857 void __iomem *mmio = hpriv->base;
0d5ff566 1858 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1859 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1860
da3dbb17 1861 if (ofs != 0xffffffffU) {
0d5ff566 1862 writelfl(val, addr + ofs);
da3dbb17
TH
1863 return 0;
1864 } else
1865 return -EINVAL;
c9d39130
JG
1866}
1867
7bb3c529 1868static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1869{
7bb3c529 1870 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1871 int early_5080;
1872
44c10138 1873 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1874
1875 if (!early_5080) {
1876 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1877 tmp |= (1 << 0);
1878 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1879 }
1880
7bb3c529 1881 mv_reset_pci_bus(host, mmio);
522479fb
JG
1882}
1883
1884static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1885{
1886 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1887}
1888
47c2b677 1889static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1890 void __iomem *mmio)
1891{
c9d39130
JG
1892 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1893 u32 tmp;
1894
1895 tmp = readl(phy_mmio + MV5_PHY_MODE);
1896
1897 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1898 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1899}
1900
47c2b677 1901static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1902{
522479fb
JG
1903 u32 tmp;
1904
1905 writel(0, mmio + MV_GPIO_PORT_CTL);
1906
1907 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1908
1909 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1910 tmp |= ~(1 << 0);
1911 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1912}
1913
2a47ce06
JG
1914static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1915 unsigned int port)
bca1c4eb 1916{
c9d39130
JG
1917 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1918 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1919 u32 tmp;
1920 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1921
1922 if (fix_apm_sq) {
1923 tmp = readl(phy_mmio + MV5_LT_MODE);
1924 tmp |= (1 << 19);
1925 writel(tmp, phy_mmio + MV5_LT_MODE);
1926
1927 tmp = readl(phy_mmio + MV5_PHY_CTL);
1928 tmp &= ~0x3;
1929 tmp |= 0x1;
1930 writel(tmp, phy_mmio + MV5_PHY_CTL);
1931 }
1932
1933 tmp = readl(phy_mmio + MV5_PHY_MODE);
1934 tmp &= ~mask;
1935 tmp |= hpriv->signal[port].pre;
1936 tmp |= hpriv->signal[port].amps;
1937 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1938}
1939
c9d39130
JG
1940
1941#undef ZERO
1942#define ZERO(reg) writel(0, port_mmio + (reg))
1943static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1944 unsigned int port)
1945{
1946 void __iomem *port_mmio = mv_port_base(mmio, port);
1947
1948 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1949
1950 mv_channel_reset(hpriv, mmio, port);
1951
1952 ZERO(0x028); /* command */
1953 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1954 ZERO(0x004); /* timer */
1955 ZERO(0x008); /* irq err cause */
1956 ZERO(0x00c); /* irq err mask */
1957 ZERO(0x010); /* rq bah */
1958 ZERO(0x014); /* rq inp */
1959 ZERO(0x018); /* rq outp */
1960 ZERO(0x01c); /* respq bah */
1961 ZERO(0x024); /* respq outp */
1962 ZERO(0x020); /* respq inp */
1963 ZERO(0x02c); /* test control */
1964 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1965}
1966#undef ZERO
1967
1968#define ZERO(reg) writel(0, hc_mmio + (reg))
1969static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1970 unsigned int hc)
47c2b677 1971{
c9d39130
JG
1972 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1973 u32 tmp;
1974
1975 ZERO(0x00c);
1976 ZERO(0x010);
1977 ZERO(0x014);
1978 ZERO(0x018);
1979
1980 tmp = readl(hc_mmio + 0x20);
1981 tmp &= 0x1c1c1c1c;
1982 tmp |= 0x03030303;
1983 writel(tmp, hc_mmio + 0x20);
1984}
1985#undef ZERO
1986
1987static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1988 unsigned int n_hc)
1989{
1990 unsigned int hc, port;
1991
1992 for (hc = 0; hc < n_hc; hc++) {
1993 for (port = 0; port < MV_PORTS_PER_HC; port++)
1994 mv5_reset_hc_port(hpriv, mmio,
1995 (hc * MV_PORTS_PER_HC) + port);
1996
1997 mv5_reset_one_hc(hpriv, mmio, hc);
1998 }
1999
2000 return 0;
47c2b677
JG
2001}
2002
101ffae2
JG
2003#undef ZERO
2004#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2005static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2006{
02a121da 2007 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2008 u32 tmp;
2009
2010 tmp = readl(mmio + MV_PCI_MODE);
2011 tmp &= 0xff00ffff;
2012 writel(tmp, mmio + MV_PCI_MODE);
2013
2014 ZERO(MV_PCI_DISC_TIMER);
2015 ZERO(MV_PCI_MSI_TRIGGER);
2016 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2017 ZERO(HC_MAIN_IRQ_MASK_OFS);
2018 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2019 ZERO(hpriv->irq_cause_ofs);
2020 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2021 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2022 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2023 ZERO(MV_PCI_ERR_ATTRIBUTE);
2024 ZERO(MV_PCI_ERR_COMMAND);
2025}
2026#undef ZERO
2027
2028static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2029{
2030 u32 tmp;
2031
2032 mv5_reset_flash(hpriv, mmio);
2033
2034 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2035 tmp &= 0x3;
2036 tmp |= (1 << 5) | (1 << 6);
2037 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2038}
2039
2040/**
2041 * mv6_reset_hc - Perform the 6xxx global soft reset
2042 * @mmio: base address of the HBA
2043 *
2044 * This routine only applies to 6xxx parts.
2045 *
2046 * LOCKING:
2047 * Inherited from caller.
2048 */
c9d39130
JG
2049static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2050 unsigned int n_hc)
101ffae2
JG
2051{
2052 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2053 int i, rc = 0;
2054 u32 t;
2055
2056 /* Following procedure defined in PCI "main command and status
2057 * register" table.
2058 */
2059 t = readl(reg);
2060 writel(t | STOP_PCI_MASTER, reg);
2061
2062 for (i = 0; i < 1000; i++) {
2063 udelay(1);
2064 t = readl(reg);
2dcb407e 2065 if (PCI_MASTER_EMPTY & t)
101ffae2 2066 break;
101ffae2
JG
2067 }
2068 if (!(PCI_MASTER_EMPTY & t)) {
2069 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2070 rc = 1;
2071 goto done;
2072 }
2073
2074 /* set reset */
2075 i = 5;
2076 do {
2077 writel(t | GLOB_SFT_RST, reg);
2078 t = readl(reg);
2079 udelay(1);
2080 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2081
2082 if (!(GLOB_SFT_RST & t)) {
2083 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2084 rc = 1;
2085 goto done;
2086 }
2087
2088 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2089 i = 5;
2090 do {
2091 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2092 t = readl(reg);
2093 udelay(1);
2094 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2095
2096 if (GLOB_SFT_RST & t) {
2097 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2098 rc = 1;
2099 }
2100done:
2101 return rc;
2102}
2103
47c2b677 2104static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2105 void __iomem *mmio)
2106{
2107 void __iomem *port_mmio;
2108 u32 tmp;
2109
ba3fe8fb
JG
2110 tmp = readl(mmio + MV_RESET_CFG);
2111 if ((tmp & (1 << 0)) == 0) {
47c2b677 2112 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2113 hpriv->signal[idx].pre = 0x1 << 5;
2114 return;
2115 }
2116
2117 port_mmio = mv_port_base(mmio, idx);
2118 tmp = readl(port_mmio + PHY_MODE2);
2119
2120 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2121 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2122}
2123
47c2b677 2124static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2125{
47c2b677 2126 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2127}
2128
c9d39130 2129static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2130 unsigned int port)
bca1c4eb 2131{
c9d39130
JG
2132 void __iomem *port_mmio = mv_port_base(mmio, port);
2133
bca1c4eb 2134 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2135 int fix_phy_mode2 =
2136 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2137 int fix_phy_mode4 =
47c2b677
JG
2138 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2139 u32 m2, tmp;
2140
2141 if (fix_phy_mode2) {
2142 m2 = readl(port_mmio + PHY_MODE2);
2143 m2 &= ~(1 << 16);
2144 m2 |= (1 << 31);
2145 writel(m2, port_mmio + PHY_MODE2);
2146
2147 udelay(200);
2148
2149 m2 = readl(port_mmio + PHY_MODE2);
2150 m2 &= ~((1 << 16) | (1 << 31));
2151 writel(m2, port_mmio + PHY_MODE2);
2152
2153 udelay(200);
2154 }
2155
2156 /* who knows what this magic does */
2157 tmp = readl(port_mmio + PHY_MODE3);
2158 tmp &= ~0x7F800000;
2159 tmp |= 0x2A800000;
2160 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2161
2162 if (fix_phy_mode4) {
47c2b677 2163 u32 m4;
bca1c4eb
JG
2164
2165 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2166
2167 if (hp_flags & MV_HP_ERRATA_60X1B2)
2168 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2169
2170 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2171
2172 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2173
2174 if (hp_flags & MV_HP_ERRATA_60X1B2)
2175 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2176 }
2177
2178 /* Revert values of pre-emphasis and signal amps to the saved ones */
2179 m2 = readl(port_mmio + PHY_MODE2);
2180
2181 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2182 m2 |= hpriv->signal[port].amps;
2183 m2 |= hpriv->signal[port].pre;
47c2b677 2184 m2 &= ~(1 << 16);
bca1c4eb 2185
e4e7b892
JG
2186 /* according to mvSata 3.6.1, some IIE values are fixed */
2187 if (IS_GEN_IIE(hpriv)) {
2188 m2 &= ~0xC30FF01F;
2189 m2 |= 0x0000900F;
2190 }
2191
bca1c4eb
JG
2192 writel(m2, port_mmio + PHY_MODE2);
2193}
2194
f351b2d6
SB
2195/* TODO: use the generic LED interface to configure the SATA Presence */
2196/* & Acitivy LEDs on the board */
2197static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2198 void __iomem *mmio)
2199{
2200 return;
2201}
2202
2203static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2204 void __iomem *mmio)
2205{
2206 void __iomem *port_mmio;
2207 u32 tmp;
2208
2209 port_mmio = mv_port_base(mmio, idx);
2210 tmp = readl(port_mmio + PHY_MODE2);
2211
2212 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2213 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2214}
2215
2216#undef ZERO
2217#define ZERO(reg) writel(0, port_mmio + (reg))
2218static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2219 void __iomem *mmio, unsigned int port)
2220{
2221 void __iomem *port_mmio = mv_port_base(mmio, port);
2222
2223 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2224
2225 mv_channel_reset(hpriv, mmio, port);
2226
2227 ZERO(0x028); /* command */
2228 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2229 ZERO(0x004); /* timer */
2230 ZERO(0x008); /* irq err cause */
2231 ZERO(0x00c); /* irq err mask */
2232 ZERO(0x010); /* rq bah */
2233 ZERO(0x014); /* rq inp */
2234 ZERO(0x018); /* rq outp */
2235 ZERO(0x01c); /* respq bah */
2236 ZERO(0x024); /* respq outp */
2237 ZERO(0x020); /* respq inp */
2238 ZERO(0x02c); /* test control */
2239 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2240}
2241
2242#undef ZERO
2243
2244#define ZERO(reg) writel(0, hc_mmio + (reg))
2245static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2246 void __iomem *mmio)
2247{
2248 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2249
2250 ZERO(0x00c);
2251 ZERO(0x010);
2252 ZERO(0x014);
2253
2254}
2255
2256#undef ZERO
2257
2258static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2259 void __iomem *mmio, unsigned int n_hc)
2260{
2261 unsigned int port;
2262
2263 for (port = 0; port < hpriv->n_ports; port++)
2264 mv_soc_reset_hc_port(hpriv, mmio, port);
2265
2266 mv_soc_reset_one_hc(hpriv, mmio);
2267
2268 return 0;
2269}
2270
2271static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2272 void __iomem *mmio)
2273{
2274 return;
2275}
2276
2277static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2278{
2279 return;
2280}
2281
c9d39130
JG
2282static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2283 unsigned int port_no)
2284{
2285 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2286
2287 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2288
ee9ccdf7 2289 if (IS_GEN_II(hpriv)) {
c9d39130 2290 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2291 ifctl |= (1 << 7); /* enable gen2i speed */
2292 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2293 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2294 }
2295
2296 udelay(25); /* allow reset propagation */
2297
2298 /* Spec never mentions clearing the bit. Marvell's driver does
2299 * clear the bit, however.
2300 */
2301 writelfl(0, port_mmio + EDMA_CMD_OFS);
2302
2303 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2304
ee9ccdf7 2305 if (IS_GEN_I(hpriv))
c9d39130
JG
2306 mdelay(1);
2307}
2308
05b308e1 2309/**
bdd4ddde 2310 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2311 * @ap: ATA channel to manipulate
2312 *
2313 * Part of this is taken from __sata_phy_reset and modified to
2314 * not sleep since this routine gets called from interrupt level.
2315 *
2316 * LOCKING:
2317 * Inherited from caller. This is coded to safe to call at
2318 * interrupt level, i.e. it does not sleep.
31961943 2319 */
bdd4ddde
JG
2320static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2321 unsigned long deadline)
20f733e7 2322{
095fec88 2323 struct mv_port_priv *pp = ap->private_data;
cca3974e 2324 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2325 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2326 int retry = 5;
2327 u32 sstatus;
20f733e7
BR
2328
2329 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2330
da3dbb17
TH
2331#ifdef DEBUG
2332 {
2333 u32 sstatus, serror, scontrol;
2334
2335 mv_scr_read(ap, SCR_STATUS, &sstatus);
2336 mv_scr_read(ap, SCR_ERROR, &serror);
2337 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2338 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2339 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2340 }
2341#endif
20f733e7 2342
22374677
JG
2343 /* Issue COMRESET via SControl */
2344comreset_retry:
936fd732 2345 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2346 msleep(1);
22374677 2347
936fd732 2348 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2349 msleep(20);
22374677 2350
31961943 2351 do {
936fd732 2352 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2353 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2354 break;
22374677 2355
bdd4ddde 2356 msleep(1);
c5d3e45a 2357 } while (time_before(jiffies, deadline));
20f733e7 2358
22374677 2359 /* work around errata */
ee9ccdf7 2360 if (IS_GEN_II(hpriv) &&
22374677
JG
2361 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2362 (retry-- > 0))
2363 goto comreset_retry;
095fec88 2364
da3dbb17
TH
2365#ifdef DEBUG
2366 {
2367 u32 sstatus, serror, scontrol;
2368
2369 mv_scr_read(ap, SCR_STATUS, &sstatus);
2370 mv_scr_read(ap, SCR_ERROR, &serror);
2371 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2372 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2373 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2374 }
2375#endif
31961943 2376
936fd732 2377 if (ata_link_offline(&ap->link)) {
bdd4ddde 2378 *class = ATA_DEV_NONE;
20f733e7
BR
2379 return;
2380 }
2381
22374677
JG
2382 /* even after SStatus reflects that device is ready,
2383 * it seems to take a while for link to be fully
2384 * established (and thus Status no longer 0x80/0x7F),
2385 * so we poll a bit for that, here.
2386 */
2387 retry = 20;
2388 while (1) {
2389 u8 drv_stat = ata_check_status(ap);
2390 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2391 break;
bdd4ddde 2392 msleep(500);
22374677
JG
2393 if (retry-- <= 0)
2394 break;
bdd4ddde
JG
2395 if (time_after(jiffies, deadline))
2396 break;
22374677
JG
2397 }
2398
bdd4ddde
JG
2399 /* FIXME: if we passed the deadline, the following
2400 * code probably produces an invalid result
2401 */
20f733e7 2402
bdd4ddde 2403 /* finally, read device signature from TF registers */
3f19859e 2404 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2405
2406 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2407
bdd4ddde 2408 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2409
bca1c4eb 2410 VPRINTK("EXIT\n");
20f733e7
BR
2411}
2412
cc0680a5 2413static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2414{
cc0680a5 2415 struct ata_port *ap = link->ap;
bdd4ddde 2416 struct mv_port_priv *pp = ap->private_data;
0ea9e179 2417
cf480626 2418 mv_stop_dma(ap);
bdd4ddde 2419
cf480626 2420 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
bdd4ddde 2421 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
bdd4ddde 2422
cf480626 2423 return 0;
22374677
JG
2424}
2425
cc0680a5 2426static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2427 unsigned long deadline)
31961943 2428{
cc0680a5 2429 struct ata_port *ap = link->ap;
bdd4ddde 2430 struct mv_host_priv *hpriv = ap->host->private_data;
f351b2d6 2431 void __iomem *mmio = hpriv->base;
31961943 2432
bdd4ddde 2433 mv_stop_dma(ap);
31961943 2434
bdd4ddde 2435 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2436
bdd4ddde
JG
2437 mv_phy_reset(ap, class, deadline);
2438
2439 return 0;
2440}
2441
cc0680a5 2442static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2443{
cc0680a5 2444 struct ata_port *ap = link->ap;
bdd4ddde
JG
2445 u32 serr;
2446
2447 /* print link status */
cc0680a5 2448 sata_print_link_status(link);
31961943 2449
bdd4ddde 2450 /* clear SError */
cc0680a5
TH
2451 sata_scr_read(link, SCR_ERROR, &serr);
2452 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2453
2454 /* bail out if no device is present */
2455 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2456 DPRINTK("EXIT, no device\n");
2457 return;
9b358e30 2458 }
bdd4ddde
JG
2459
2460 /* set up device control */
2461 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2462}
2463
2464static void mv_error_handler(struct ata_port *ap)
2465{
2466 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2467 mv_hardreset, mv_postreset);
2468}
2469
bdd4ddde
JG
2470static void mv_eh_freeze(struct ata_port *ap)
2471{
f351b2d6 2472 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2473 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2474 u32 tmp, mask;
2475 unsigned int shift;
2476
2477 /* FIXME: handle coalescing completion events properly */
2478
2479 shift = ap->port_no * 2;
2480 if (hc > 0)
2481 shift++;
2482
2483 mask = 0x3 << shift;
2484
2485 /* disable assertion of portN err, done events */
f351b2d6
SB
2486 tmp = readl(hpriv->main_mask_reg_addr);
2487 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2488}
2489
2490static void mv_eh_thaw(struct ata_port *ap)
2491{
f351b2d6
SB
2492 struct mv_host_priv *hpriv = ap->host->private_data;
2493 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2494 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2495 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2496 void __iomem *port_mmio = mv_ap_base(ap);
2497 u32 tmp, mask, hc_irq_cause;
2498 unsigned int shift, hc_port_no = ap->port_no;
2499
2500 /* FIXME: handle coalescing completion events properly */
2501
2502 shift = ap->port_no * 2;
2503 if (hc > 0) {
2504 shift++;
2505 hc_port_no -= 4;
2506 }
2507
2508 mask = 0x3 << shift;
2509
2510 /* clear EDMA errors on this port */
2511 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2512
2513 /* clear pending irq events */
2514 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2515 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2516 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2517 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2518
2519 /* enable assertion of portN err, done events */
f351b2d6
SB
2520 tmp = readl(hpriv->main_mask_reg_addr);
2521 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2522}
2523
05b308e1
BR
2524/**
2525 * mv_port_init - Perform some early initialization on a single port.
2526 * @port: libata data structure storing shadow register addresses
2527 * @port_mmio: base address of the port
2528 *
2529 * Initialize shadow register mmio addresses, clear outstanding
2530 * interrupts on the port, and unmask interrupts for the future
2531 * start of the port.
2532 *
2533 * LOCKING:
2534 * Inherited from caller.
2535 */
31961943 2536static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2537{
0d5ff566 2538 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2539 unsigned serr_ofs;
2540
8b260248 2541 /* PIO related setup
31961943
BR
2542 */
2543 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2544 port->error_addr =
31961943
BR
2545 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2546 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2547 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2548 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2549 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2550 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2551 port->status_addr =
31961943
BR
2552 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2553 /* special case: control/altstatus doesn't have ATA_REG_ address */
2554 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2555
2556 /* unused: */
8d9db2d2 2557 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2558
31961943
BR
2559 /* Clear any currently outstanding port interrupt conditions */
2560 serr_ofs = mv_scr_offset(SCR_ERROR);
2561 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2562 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2563
646a4da5
ML
2564 /* unmask all non-transient EDMA error interrupts */
2565 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2566
8b260248 2567 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2568 readl(port_mmio + EDMA_CFG_OFS),
2569 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2570 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2571}
2572
4447d351 2573static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2574{
4447d351
TH
2575 struct pci_dev *pdev = to_pci_dev(host->dev);
2576 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2577 u32 hp_flags = hpriv->hp_flags;
2578
5796d1c4 2579 switch (board_idx) {
47c2b677
JG
2580 case chip_5080:
2581 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2582 hp_flags |= MV_HP_GEN_I;
47c2b677 2583
44c10138 2584 switch (pdev->revision) {
47c2b677
JG
2585 case 0x1:
2586 hp_flags |= MV_HP_ERRATA_50XXB0;
2587 break;
2588 case 0x3:
2589 hp_flags |= MV_HP_ERRATA_50XXB2;
2590 break;
2591 default:
2592 dev_printk(KERN_WARNING, &pdev->dev,
2593 "Applying 50XXB2 workarounds to unknown rev\n");
2594 hp_flags |= MV_HP_ERRATA_50XXB2;
2595 break;
2596 }
2597 break;
2598
bca1c4eb
JG
2599 case chip_504x:
2600 case chip_508x:
47c2b677 2601 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2602 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2603
44c10138 2604 switch (pdev->revision) {
47c2b677
JG
2605 case 0x0:
2606 hp_flags |= MV_HP_ERRATA_50XXB0;
2607 break;
2608 case 0x3:
2609 hp_flags |= MV_HP_ERRATA_50XXB2;
2610 break;
2611 default:
2612 dev_printk(KERN_WARNING, &pdev->dev,
2613 "Applying B2 workarounds to unknown rev\n");
2614 hp_flags |= MV_HP_ERRATA_50XXB2;
2615 break;
bca1c4eb
JG
2616 }
2617 break;
2618
2619 case chip_604x:
2620 case chip_608x:
47c2b677 2621 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2622 hp_flags |= MV_HP_GEN_II;
47c2b677 2623
44c10138 2624 switch (pdev->revision) {
47c2b677
JG
2625 case 0x7:
2626 hp_flags |= MV_HP_ERRATA_60X1B2;
2627 break;
2628 case 0x9:
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2630 break;
2631 default:
2632 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2633 "Applying B2 workarounds to unknown rev\n");
2634 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2635 break;
2636 }
2637 break;
2638
e4e7b892 2639 case chip_7042:
02a121da 2640 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2641 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2642 (pdev->device == 0x2300 || pdev->device == 0x2310))
2643 {
4e520033
ML
2644 /*
2645 * Highpoint RocketRAID PCIe 23xx series cards:
2646 *
2647 * Unconfigured drives are treated as "Legacy"
2648 * by the BIOS, and it overwrites sector 8 with
2649 * a "Lgcy" metadata block prior to Linux boot.
2650 *
2651 * Configured drives (RAID or JBOD) leave sector 8
2652 * alone, but instead overwrite a high numbered
2653 * sector for the RAID metadata. This sector can
2654 * be determined exactly, by truncating the physical
2655 * drive capacity to a nice even GB value.
2656 *
2657 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2658 *
2659 * Warn the user, lest they think we're just buggy.
2660 */
2661 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2662 " BIOS CORRUPTS DATA on all attached drives,"
2663 " regardless of if/how they are configured."
2664 " BEWARE!\n");
2665 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2666 " use sectors 8-9 on \"Legacy\" drives,"
2667 " and avoid the final two gigabytes on"
2668 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2669 }
e4e7b892
JG
2670 case chip_6042:
2671 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2672 hp_flags |= MV_HP_GEN_IIE;
2673
44c10138 2674 switch (pdev->revision) {
e4e7b892
JG
2675 case 0x0:
2676 hp_flags |= MV_HP_ERRATA_XX42A0;
2677 break;
2678 case 0x1:
2679 hp_flags |= MV_HP_ERRATA_60X1C0;
2680 break;
2681 default:
2682 dev_printk(KERN_WARNING, &pdev->dev,
2683 "Applying 60X1C0 workarounds to unknown rev\n");
2684 hp_flags |= MV_HP_ERRATA_60X1C0;
2685 break;
2686 }
2687 break;
f351b2d6
SB
2688 case chip_soc:
2689 hpriv->ops = &mv_soc_ops;
2690 hp_flags |= MV_HP_ERRATA_60X1C0;
2691 break;
e4e7b892 2692
bca1c4eb 2693 default:
f351b2d6 2694 dev_printk(KERN_ERR, host->dev,
5796d1c4 2695 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2696 return 1;
2697 }
2698
2699 hpriv->hp_flags = hp_flags;
02a121da
ML
2700 if (hp_flags & MV_HP_PCIE) {
2701 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2702 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2703 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2704 } else {
2705 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2706 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2707 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2708 }
bca1c4eb
JG
2709
2710 return 0;
2711}
2712
05b308e1 2713/**
47c2b677 2714 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2715 * @host: ATA host to initialize
2716 * @board_idx: controller index
05b308e1
BR
2717 *
2718 * If possible, do an early global reset of the host. Then do
2719 * our port init and clear/unmask all/relevant host interrupts.
2720 *
2721 * LOCKING:
2722 * Inherited from caller.
2723 */
4447d351 2724static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2725{
2726 int rc = 0, n_hc, port, hc;
4447d351 2727 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2728 void __iomem *mmio = hpriv->base;
47c2b677 2729
4447d351 2730 rc = mv_chip_id(host, board_idx);
bca1c4eb 2731 if (rc)
f351b2d6
SB
2732 goto done;
2733
2734 if (HAS_PCI(host)) {
2735 hpriv->main_cause_reg_addr = hpriv->base +
2736 HC_MAIN_IRQ_CAUSE_OFS;
2737 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2738 } else {
2739 hpriv->main_cause_reg_addr = hpriv->base +
2740 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2741 hpriv->main_mask_reg_addr = hpriv->base +
2742 HC_SOC_MAIN_IRQ_MASK_OFS;
2743 }
2744 /* global interrupt mask */
2745 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2746
4447d351 2747 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2748
4447d351 2749 for (port = 0; port < host->n_ports; port++)
47c2b677 2750 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2751
c9d39130 2752 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2753 if (rc)
20f733e7 2754 goto done;
20f733e7 2755
522479fb 2756 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2757 hpriv->ops->reset_bus(host, mmio);
47c2b677 2758 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2759
4447d351 2760 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2761 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2762 void __iomem *port_mmio = mv_port_base(mmio, port);
2763
2a47ce06 2764 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2765 ifctl |= (1 << 7); /* enable gen2i speed */
2766 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2767 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2768 }
2769
c9d39130 2770 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2771 }
2772
4447d351 2773 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2774 struct ata_port *ap = host->ports[port];
2a47ce06 2775 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2776
2777 mv_port_init(&ap->ioaddr, port_mmio);
2778
7bb3c529 2779#ifdef CONFIG_PCI
f351b2d6
SB
2780 if (HAS_PCI(host)) {
2781 unsigned int offset = port_mmio - mmio;
2782 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2783 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2784 }
7bb3c529 2785#endif
20f733e7
BR
2786 }
2787
2788 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2789 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2790
2791 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2792 "(before clear)=0x%08x\n", hc,
2793 readl(hc_mmio + HC_CFG_OFS),
2794 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2795
2796 /* Clear any currently outstanding hc interrupt conditions */
2797 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2798 }
2799
f351b2d6
SB
2800 if (HAS_PCI(host)) {
2801 /* Clear any currently outstanding host interrupt conditions */
2802 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2803
f351b2d6
SB
2804 /* and unmask interrupt generation for host regs */
2805 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2806 if (IS_GEN_I(hpriv))
2807 writelfl(~HC_MAIN_MASKED_IRQS_5,
2808 hpriv->main_mask_reg_addr);
2809 else
2810 writelfl(~HC_MAIN_MASKED_IRQS,
2811 hpriv->main_mask_reg_addr);
2812
2813 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2814 "PCI int cause/mask=0x%08x/0x%08x\n",
2815 readl(hpriv->main_cause_reg_addr),
2816 readl(hpriv->main_mask_reg_addr),
2817 readl(mmio + hpriv->irq_cause_ofs),
2818 readl(mmio + hpriv->irq_mask_ofs));
2819 } else {
2820 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2821 hpriv->main_mask_reg_addr);
2822 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2823 readl(hpriv->main_cause_reg_addr),
2824 readl(hpriv->main_mask_reg_addr));
2825 }
2826done:
2827 return rc;
2828}
fb621e2f 2829
fbf14e2f
BB
2830static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2831{
2832 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2833 MV_CRQB_Q_SZ, 0);
2834 if (!hpriv->crqb_pool)
2835 return -ENOMEM;
2836
2837 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2838 MV_CRPB_Q_SZ, 0);
2839 if (!hpriv->crpb_pool)
2840 return -ENOMEM;
2841
2842 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2843 MV_SG_TBL_SZ, 0);
2844 if (!hpriv->sg_tbl_pool)
2845 return -ENOMEM;
2846
2847 return 0;
2848}
2849
f351b2d6
SB
2850/**
2851 * mv_platform_probe - handle a positive probe of an soc Marvell
2852 * host
2853 * @pdev: platform device found
2854 *
2855 * LOCKING:
2856 * Inherited from caller.
2857 */
2858static int mv_platform_probe(struct platform_device *pdev)
2859{
2860 static int printed_version;
2861 const struct mv_sata_platform_data *mv_platform_data;
2862 const struct ata_port_info *ppi[] =
2863 { &mv_port_info[chip_soc], NULL };
2864 struct ata_host *host;
2865 struct mv_host_priv *hpriv;
2866 struct resource *res;
2867 int n_ports, rc;
20f733e7 2868
f351b2d6
SB
2869 if (!printed_version++)
2870 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2871
f351b2d6
SB
2872 /*
2873 * Simple resource validation ..
2874 */
2875 if (unlikely(pdev->num_resources != 2)) {
2876 dev_err(&pdev->dev, "invalid number of resources\n");
2877 return -EINVAL;
2878 }
2879
2880 /*
2881 * Get the register base first
2882 */
2883 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2884 if (res == NULL)
2885 return -EINVAL;
2886
2887 /* allocate host */
2888 mv_platform_data = pdev->dev.platform_data;
2889 n_ports = mv_platform_data->n_ports;
2890
2891 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2892 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2893
2894 if (!host || !hpriv)
2895 return -ENOMEM;
2896 host->private_data = hpriv;
2897 hpriv->n_ports = n_ports;
2898
2899 host->iomap = NULL;
f1cb0ea1
SB
2900 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2901 res->end - res->start + 1);
f351b2d6
SB
2902 hpriv->base -= MV_SATAHC0_REG_BASE;
2903
fbf14e2f
BB
2904 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2905 if (rc)
2906 return rc;
2907
f351b2d6
SB
2908 /* initialize adapter */
2909 rc = mv_init_host(host, chip_soc);
2910 if (rc)
2911 return rc;
2912
2913 dev_printk(KERN_INFO, &pdev->dev,
2914 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2915 host->n_ports);
2916
2917 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2918 IRQF_SHARED, &mv6_sht);
2919}
2920
2921/*
2922 *
2923 * mv_platform_remove - unplug a platform interface
2924 * @pdev: platform device
2925 *
2926 * A platform bus SATA device has been unplugged. Perform the needed
2927 * cleanup. Also called on module unload for any active devices.
2928 */
2929static int __devexit mv_platform_remove(struct platform_device *pdev)
2930{
2931 struct device *dev = &pdev->dev;
2932 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2933
2934 ata_host_detach(host);
f351b2d6 2935 return 0;
20f733e7
BR
2936}
2937
f351b2d6
SB
2938static struct platform_driver mv_platform_driver = {
2939 .probe = mv_platform_probe,
2940 .remove = __devexit_p(mv_platform_remove),
2941 .driver = {
2942 .name = DRV_NAME,
2943 .owner = THIS_MODULE,
2944 },
2945};
2946
2947
7bb3c529 2948#ifdef CONFIG_PCI
f351b2d6
SB
2949static int mv_pci_init_one(struct pci_dev *pdev,
2950 const struct pci_device_id *ent);
2951
7bb3c529
SB
2952
2953static struct pci_driver mv_pci_driver = {
2954 .name = DRV_NAME,
2955 .id_table = mv_pci_tbl,
f351b2d6 2956 .probe = mv_pci_init_one,
7bb3c529
SB
2957 .remove = ata_pci_remove_one,
2958};
2959
2960/*
2961 * module options
2962 */
2963static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2964
2965
2966/* move to PCI layer or libata core? */
2967static int pci_go_64(struct pci_dev *pdev)
2968{
2969 int rc;
2970
2971 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2972 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2973 if (rc) {
2974 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2975 if (rc) {
2976 dev_printk(KERN_ERR, &pdev->dev,
2977 "64-bit DMA enable failed\n");
2978 return rc;
2979 }
2980 }
2981 } else {
2982 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2983 if (rc) {
2984 dev_printk(KERN_ERR, &pdev->dev,
2985 "32-bit DMA enable failed\n");
2986 return rc;
2987 }
2988 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2989 if (rc) {
2990 dev_printk(KERN_ERR, &pdev->dev,
2991 "32-bit consistent DMA enable failed\n");
2992 return rc;
2993 }
2994 }
2995
2996 return rc;
2997}
2998
05b308e1
BR
2999/**
3000 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 3001 * @host: ATA host to print info about
05b308e1
BR
3002 *
3003 * FIXME: complete this.
3004 *
3005 * LOCKING:
3006 * Inherited from caller.
3007 */
4447d351 3008static void mv_print_info(struct ata_host *host)
31961943 3009{
4447d351
TH
3010 struct pci_dev *pdev = to_pci_dev(host->dev);
3011 struct mv_host_priv *hpriv = host->private_data;
44c10138 3012 u8 scc;
c1e4fe71 3013 const char *scc_s, *gen;
31961943
BR
3014
3015 /* Use this to determine the HW stepping of the chip so we know
3016 * what errata to workaround
3017 */
31961943
BR
3018 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3019 if (scc == 0)
3020 scc_s = "SCSI";
3021 else if (scc == 0x01)
3022 scc_s = "RAID";
3023 else
c1e4fe71
JG
3024 scc_s = "?";
3025
3026 if (IS_GEN_I(hpriv))
3027 gen = "I";
3028 else if (IS_GEN_II(hpriv))
3029 gen = "II";
3030 else if (IS_GEN_IIE(hpriv))
3031 gen = "IIE";
3032 else
3033 gen = "?";
31961943 3034
a9524a76 3035 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
3036 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3037 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
3038 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3039}
3040
05b308e1 3041/**
f351b2d6 3042 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3043 * @pdev: PCI device found
3044 * @ent: PCI device ID entry for the matched host
3045 *
3046 * LOCKING:
3047 * Inherited from caller.
3048 */
f351b2d6
SB
3049static int mv_pci_init_one(struct pci_dev *pdev,
3050 const struct pci_device_id *ent)
20f733e7 3051{
2dcb407e 3052 static int printed_version;
20f733e7 3053 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3054 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3055 struct ata_host *host;
3056 struct mv_host_priv *hpriv;
3057 int n_ports, rc;
20f733e7 3058
a9524a76
JG
3059 if (!printed_version++)
3060 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3061
4447d351
TH
3062 /* allocate host */
3063 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3064
3065 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3066 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3067 if (!host || !hpriv)
3068 return -ENOMEM;
3069 host->private_data = hpriv;
f351b2d6 3070 hpriv->n_ports = n_ports;
4447d351
TH
3071
3072 /* acquire resources */
24dc5f33
TH
3073 rc = pcim_enable_device(pdev);
3074 if (rc)
20f733e7 3075 return rc;
20f733e7 3076
0d5ff566
TH
3077 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3078 if (rc == -EBUSY)
24dc5f33 3079 pcim_pin_device(pdev);
0d5ff566 3080 if (rc)
24dc5f33 3081 return rc;
4447d351 3082 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3083 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3084
d88184fb
JG
3085 rc = pci_go_64(pdev);
3086 if (rc)
3087 return rc;
3088
da2fa9ba
ML
3089 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3090 if (rc)
3091 return rc;
3092
20f733e7 3093 /* initialize adapter */
4447d351 3094 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3095 if (rc)
3096 return rc;
20f733e7 3097
31961943 3098 /* Enable interrupts */
6a59dcf8 3099 if (msi && pci_enable_msi(pdev))
31961943 3100 pci_intx(pdev, 1);
20f733e7 3101
31961943 3102 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3103 mv_print_info(host);
20f733e7 3104
4447d351 3105 pci_set_master(pdev);
ea8b4db9 3106 pci_try_set_mwi(pdev);
4447d351 3107 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3108 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3109}
7bb3c529 3110#endif
20f733e7 3111
f351b2d6
SB
3112static int mv_platform_probe(struct platform_device *pdev);
3113static int __devexit mv_platform_remove(struct platform_device *pdev);
3114
20f733e7
BR
3115static int __init mv_init(void)
3116{
7bb3c529
SB
3117 int rc = -ENODEV;
3118#ifdef CONFIG_PCI
3119 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3120 if (rc < 0)
3121 return rc;
3122#endif
3123 rc = platform_driver_register(&mv_platform_driver);
3124
3125#ifdef CONFIG_PCI
3126 if (rc < 0)
3127 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3128#endif
3129 return rc;
20f733e7
BR
3130}
3131
3132static void __exit mv_exit(void)
3133{
7bb3c529 3134#ifdef CONFIG_PCI
20f733e7 3135 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3136#endif
f351b2d6 3137 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3138}
3139
3140MODULE_AUTHOR("Brett Russ");
3141MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3142MODULE_LICENSE("GPL");
3143MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3144MODULE_VERSION(DRV_VERSION);
2e7e1214 3145MODULE_ALIAS("platform:sata_mv");
20f733e7 3146
7bb3c529 3147#ifdef CONFIG_PCI
ddef9bb3
JG
3148module_param(msi, int, 0444);
3149MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3150#endif
ddef9bb3 3151
20f733e7
BR
3152module_init(mv_init);
3153module_exit(mv_exit);