Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
cb05cbb3 101static unsigned int ipr_number_of_msix = 16;
4fdd7c7a 102static unsigned int ipr_fast_reboot;
1da177e4
LT
103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 108 .mailbox = 0x0042C,
89aad428 109 .max_cmds = 100,
1da177e4 110 .cache_line_size = 0x20,
7dd21308 111 .clear_isr = 1,
b53d124a 112 .iopoll_weight = 0,
1da177e4
LT
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
214777ba 116 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 117 .sense_interrupt_mask_reg = 0x0022C,
214777ba 118 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 119 .clr_interrupt_reg = 0x00228,
214777ba 120 .clr_interrupt_reg32 = 0x00228,
1da177e4 121 .sense_interrupt_reg = 0x00224,
214777ba 122 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
214777ba 125 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 126 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
89aad428 134 .max_cmds = 100,
1da177e4 135 .cache_line_size = 0x20,
7dd21308 136 .clear_isr = 1,
b53d124a 137 .iopoll_weight = 0,
1da177e4
LT
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
214777ba 141 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 142 .sense_interrupt_mask_reg = 0x00288,
214777ba 143 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 144 .clr_interrupt_reg = 0x00284,
214777ba 145 .clr_interrupt_reg32 = 0x00284,
1da177e4 146 .sense_interrupt_reg = 0x00280,
214777ba 147 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
214777ba 150 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 151 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
155 }
156 },
a74c1639 157 { /* CRoC */
110def85 158 .mailbox = 0x00044,
89aad428 159 .max_cmds = 1000,
a74c1639 160 .cache_line_size = 0x20,
7dd21308 161 .clear_isr = 0,
b53d124a 162 .iopoll_weight = 64,
a74c1639
WB
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
214777ba 166 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 167 .sense_interrupt_mask_reg = 0x00010,
214777ba 168 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 169 .clr_interrupt_reg = 0x00008,
214777ba 170 .clr_interrupt_reg32 = 0x0000C,
a74c1639 171 .sense_interrupt_reg = 0x00000,
214777ba 172 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
214777ba 175 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 176 .set_uproc_interrupt_reg = 0x00020,
214777ba 177 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 178 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
dcbad00e 181 .dump_addr_reg = 0x00064,
8701f185
WB
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
a74c1639
WB
184 }
185 },
1da177e4
LT
186};
187
188static const struct ipr_chip_t ipr_chip[] = {
a299ee62
CH
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
199};
200
203fa3fe 201static int ipr_max_bus_speeds[] = {
1da177e4
LT
202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
cb05cbb3 225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
4fdd7c7a
BK
226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
1da177e4
LT
228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
1da177e4
LT
231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
933916f3 234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
933916f3 242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 243 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 245 "4101: Soft device bus fabric error"},
5aa3a333
WB
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "FFF9: Device sector reassign successful"},
933916f3 262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "7001: IOA sector reassignment successful"},
933916f3 266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 273 "FFF6: Device hardware error recovered by the IOA"},
933916f3 274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "FFF6: Device hardware error recovered by the device"},
933916f3 276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 277 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 279 "FFFA: Undefined device response recovered by the IOA"},
933916f3 280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 281 "FFF6: Device bus error, message or command phase"},
933916f3 282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 283 "FFFE: Task Management Function failed"},
933916f3 284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 285 "FFF6: Failure prediction threshold exceeded"},
933916f3 286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 287 "8009: Impending cache battery pack failure"},
ed7bd661 288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
1da177e4
LT
292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
ed7bd661 294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
1da177e4
LT
298 {0x023F0000, 0, 0,
299 "Synchronization required"},
ed7bd661 300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
1da177e4
LT
304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
933916f3 308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
933916f3 314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 315 "FFF3: Disk media format bad"},
933916f3 316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 317 "3002: Addressed device failed to respond to selection"},
933916f3 318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 319 "3100: Device bus error"},
933916f3 320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
933916f3 324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 325 "4100: Hard device bus fabric error"},
5aa3a333
WB
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
933916f3 340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "9000: IOA reserved area data check"},
933916f3 342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 343 "9001: IOA reserved area invalid data pattern"},
933916f3 344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "9002: IOA reserved area LRC error"},
5aa3a333
WB
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
933916f3 348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 349 "102E: Out of alternate sectors for disk storage"},
933916f3 350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "FFF4: Data transfer underlength error"},
933916f3 352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "FFF4: Data transfer overlength error"},
933916f3 354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 355 "3400: Logical unit failure"},
933916f3 356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 357 "FFF4: Device microcode is corrupt"},
933916f3 358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
933916f3 362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "FFF4: Disk device problem"},
933916f3 364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 365 "8150: Permanent IOA failure"},
933916f3 366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 367 "3010: Disk device returned wrong response to IOA"},
933916f3 368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
933916f3 372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 373 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
374 {0x04448700, 0, 0,
375 "ATA device status error"},
1da177e4
LT
376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
933916f3 378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 379 "8008: A permanent cache battery pack failure occurred"},
933916f3 380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 381 "9090: Disk unit has been modified after the last known status"},
933916f3 382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 383 "9081: IOA detected device error"},
933916f3 384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 385 "9082: IOA detected device error"},
933916f3 386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 387 "3110: Device bus error, message or command phase"},
933916f3 388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 389 "3110: SAS Command / Task Management Function failed"},
933916f3 390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 391 "9091: Incorrect hardware configuration change has been detected"},
933916f3 392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 393 "9073: Invalid multi-adapter configuration"},
933916f3 394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 395 "4010: Incorrect connection between cascaded expanders"},
933916f3 396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 397 "4020: Connections exceed IOA design limits"},
933916f3 398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 399 "4030: Incorrect multipath connection"},
933916f3 400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 401 "4110: Unsupported enclosure function"},
ed7bd661 402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
933916f3 404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
b0df54bb 410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
b0df54bb 424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
ed7bd661 426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
933916f3 430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 431 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 433 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
81471b07
WX
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
933916f3 440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 441 "3140: Device bus not ready to ready transition"},
933916f3 442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
443 "FFFB: SCSI bus was reset"},
444 {0x06290500, 0, 0,
445 "FFFE: SCSI bus transition to single ended"},
446 {0x06290600, 0, 0,
447 "FFFE: SCSI bus transition to LVD"},
933916f3 448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 449 "FFFB: SCSI bus was reset by another initiator"},
933916f3 450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 451 "3029: A device replacement has occurred"},
ed7bd661 452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
933916f3 454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 455 "9051: IOA cache data exists for a missing or failed device"},
933916f3 456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 459 "9025: Disk unit is not supported at its physical location"},
933916f3 460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 461 "3020: IOA detected a SCSI bus configuration error"},
933916f3 462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 463 "3150: SCSI bus configuration error"},
933916f3 464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 465 "9074: Asymmetric advanced function disk configuration"},
933916f3 466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 467 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 469 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 471 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 473 "9076: Configuration error, missing remote IOA"},
933916f3 474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 475 "4050: Enclosure does not support a required multipath function"},
ed7bd661 476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
933916f3 486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 487 "9041: Array protection temporarily suspended"},
933916f3 488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 489 "9042: Corrupt array parity detected on specified device"},
933916f3 490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 493 "9071: Link operational transition"},
933916f3 494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 495 "9072: Link not operational transition"},
933916f3 496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 497 "9032: Array exposed but still protected"},
7b3871fd 498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
e435340c 499 "70DD: Device forced failed by disrupt device command"},
933916f3 500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 501 "4061: Multipath redundancy level got better"},
933916f3 502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 503 "4060: Multipath redundancy level got worse"},
7b3871fd 504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
f8ee25d7 505 "9083: Device raw mode enabled"},
7b3871fd 506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
f8ee25d7 507 "9084: Device raw mode disabled"},
1da177e4
LT
508 {0x07270000, 0, 0,
509 "Failure due to other device"},
933916f3 510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 511 "9008: IOA does not support functions expected by devices"},
933916f3 512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 513 "9010: Cache data associated with attached devices cannot be found"},
933916f3 514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 515 "9011: Cache data belongs to devices other than those attached"},
933916f3 516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 517 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 519 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 521 "9022: Exposed array is missing a required device"},
933916f3 522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 523 "9023: Array member(s) not at required physical locations"},
933916f3 524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 525 "9024: Array not functional due to present hardware configuration"},
933916f3 526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 527 "9026: Array not functional due to present hardware configuration"},
933916f3 528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 529 "9027: Array is missing a device and parity is out of sync"},
933916f3 530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 531 "9028: Maximum number of arrays already exist"},
933916f3 532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 533 "9050: Required cache data cannot be located for a disk unit"},
933916f3 534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 535 "9052: Cache data exists for a device that has been modified"},
933916f3 536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 537 "9054: IOA resources not available due to previous problems"},
933916f3 538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 539 "9092: Disk unit requires initialization before use"},
933916f3 540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 541 "9029: Incorrect hardware configuration change has been detected"},
933916f3 542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 543 "9060: One or more disk pairs are missing from an array"},
933916f3 544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 545 "9061: One or more disks are missing from an array"},
933916f3 546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 547 "9062: One or more disks are missing from an array"},
933916f3 548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 549 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 550 {0x07279A00, 0, 0,
551 "Data protect, other volume set problem"},
1da177e4
LT
552 {0x0B260000, 0, 0,
553 "Aborted command, invalid descriptor"},
ed7bd661 554 {0x0B3F9000, 0, 0,
555 "Target operating conditions have changed, dual adapter takeover"},
556 {0x0B530200, 0, 0,
557 "Aborted command, medium removal prevented"},
1da177e4 558 {0x0B5A0000, 0, 0,
ed7bd661 559 "Command terminated by host"},
560 {0x0B5B8000, 0, 0,
561 "Aborted command, command terminated by host"}
1da177e4
LT
562};
563
564static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578};
579
580/*
581 * Function Prototypes
582 */
583static int ipr_reset_alert(struct ipr_cmnd *);
584static void ipr_process_ccn(struct ipr_cmnd *);
585static void ipr_process_error(struct ipr_cmnd *);
586static void ipr_reset_ioa_job(struct ipr_cmnd *);
587static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
589
590#ifdef CONFIG_SCSI_IPR_TRACE
591/**
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
594 * @type: trace type
595 * @add_data: additional data
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
602{
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
bb7c5433 605 unsigned int trace_index;
1da177e4 606
bb7c5433
BK
607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
1da177e4
LT
609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
a32c055f
WB
612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614 else
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
56d6aa33 619 wmb();
1da177e4
LT
620}
621#else
203fa3fe 622#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
623#endif
624
172cd6e1
BK
625/**
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633{
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640}
641
1da177e4
LT
642/**
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 655 int hrrq_id;
1da177e4 656
05a6538a 657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 660 ioarcb->data_transfer_length = 0;
1da177e4 661 ioarcb->read_data_transfer_length = 0;
a32c055f 662 ioarcb->ioadl_len = 0;
1da177e4 663 ioarcb->read_ioadl_len = 0;
a32c055f 664
96d21f00 665 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
668 ioasa64->u.gata.status = 0;
669 } else {
a32c055f
WB
670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 673 ioasa->u.gata.status = 0;
a32c055f
WB
674 }
675
96d21f00
WB
676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
1da177e4 678 ipr_cmd->scsi_cmd = NULL;
35a39691 679 ipr_cmd->qc = NULL;
1da177e4
LT
680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
682}
683
684/**
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
687 *
688 * Return value:
689 * none
690 **/
172cd6e1
BK
691static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
693{
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
6cdb0817 697 ipr_cmd->eh_comp = NULL;
172cd6e1 698 ipr_cmd->fast_done = fast_done;
738c6ec5 699 timer_setup(&ipr_cmd->timer, NULL, 0);
1da177e4
LT
700}
701
702/**
00bfef2c 703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
704 * @ioa_cfg: ioa config struct
705 *
706 * Return value:
707 * pointer to ipr command struct
708 **/
709static
05a6538a 710struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 711{
05a6538a 712 struct ipr_cmnd *ipr_cmd = NULL;
713
714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
718 }
1da177e4 719
1da177e4
LT
720
721 return ipr_cmd;
722}
723
00bfef2c
BK
724/**
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
727 *
728 * Return value:
729 * pointer to ipr command struct
730 **/
731static
732struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733{
05a6538a 734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
737 return ipr_cmd;
738}
739
1da177e4
LT
740/**
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
744 *
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
747 *
748 * Return value:
749 * none
750 **/
751static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 u32 clr_ints)
753{
754 volatile u32 int_reg;
56d6aa33 755 int i;
1da177e4
LT
756
757 /* Stop new interrupts */
56d6aa33 758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 }
763 wmb();
1da177e4
LT
764
765 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
766 if (ioa_cfg->sis64)
767 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768 else
769 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
770
771 /* Clear any pending interrupts */
214777ba
WB
772 if (ioa_cfg->sis64)
773 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
774 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
775 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
776}
777
778/**
779 * ipr_save_pcix_cmd_reg - Save PCI-X command register
780 * @ioa_cfg: ioa config struct
781 *
782 * Return value:
783 * 0 on success / -EIO on failure
784 **/
785static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786{
787 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788
7dce0e1c
BK
789 if (pcix_cmd_reg == 0)
790 return 0;
1da177e4
LT
791
792 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
793 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
794 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
795 return -EIO;
796 }
797
798 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
799 return 0;
800}
801
802/**
803 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
804 * @ioa_cfg: ioa config struct
805 *
806 * Return value:
807 * 0 on success / -EIO on failure
808 **/
809static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810{
811 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812
813 if (pcix_cmd_reg) {
814 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
815 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
816 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817 return -EIO;
818 }
1da177e4
LT
819 }
820
821 return 0;
822}
823
35a39691 824/**
f646f325 825 * __ipr_sata_eh_done - done function for aborted SATA commands
35a39691
BK
826 * @ipr_cmd: ipr command struct
827 *
828 * This function is invoked for ops generated to SATA
829 * devices which are being aborted.
830 *
831 * Return value:
832 * none
833 **/
f646f325 834static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
35a39691 835{
35a39691
BK
836 struct ata_queued_cmd *qc = ipr_cmd->qc;
837 struct ipr_sata_port *sata_port = qc->ap->private_data;
838
839 qc->err_mask |= AC_ERR_OTHER;
840 sata_port->ioasa.status |= ATA_BUSY;
35a39691 841 ata_qc_complete(qc);
66a0d59c
BK
842 if (ipr_cmd->eh_comp)
843 complete(ipr_cmd->eh_comp);
844 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
845}
846
1da177e4 847/**
f646f325
BK
848 * ipr_sata_eh_done - done function for aborted SATA commands
849 * @ipr_cmd: ipr command struct
850 *
851 * This function is invoked for ops generated to SATA
852 * devices which are being aborted.
853 *
854 * Return value:
855 * none
856 **/
857static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858{
859 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
860 unsigned long hrrq_flags;
861
862 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
863 __ipr_sata_eh_done(ipr_cmd);
864 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
865}
866
867/**
868 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
1da177e4
LT
869 * @ipr_cmd: ipr command struct
870 *
871 * This function is invoked by the interrupt handler for
872 * ops generated by the SCSI mid-layer which are being aborted.
873 *
874 * Return value:
875 * none
876 **/
f646f325 877static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
1da177e4 878{
1da177e4
LT
879 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880
881 scsi_cmd->result |= (DID_ERROR << 16);
882
63015bc9 883 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 884 scsi_cmd->scsi_done(scsi_cmd);
6cdb0817
BK
885 if (ipr_cmd->eh_comp)
886 complete(ipr_cmd->eh_comp);
05a6538a 887 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
888}
889
f646f325
BK
890/**
891 * ipr_scsi_eh_done - mid-layer done function for aborted ops
892 * @ipr_cmd: ipr command struct
893 *
894 * This function is invoked by the interrupt handler for
895 * ops generated by the SCSI mid-layer which are being aborted.
896 *
897 * Return value:
898 * none
899 **/
900static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901{
902 unsigned long hrrq_flags;
903 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
904
905 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
906 __ipr_scsi_eh_done(ipr_cmd);
907 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
908}
909
1da177e4
LT
910/**
911 * ipr_fail_all_ops - Fails all outstanding ops.
912 * @ioa_cfg: ioa config struct
913 *
914 * This function fails all outstanding ops.
915 *
916 * Return value:
917 * none
918 **/
919static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920{
921 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 922 struct ipr_hrr_queue *hrrq;
1da177e4
LT
923
924 ENTER;
05a6538a 925 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 926 spin_lock(&hrrq->_lock);
05a6538a 927 list_for_each_entry_safe(ipr_cmd,
928 temp, &hrrq->hrrq_pending_q, queue) {
929 list_del(&ipr_cmd->queue);
1da177e4 930
05a6538a 931 ipr_cmd->s.ioasa.hdr.ioasc =
932 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
933 ipr_cmd->s.ioasa.hdr.ilid =
934 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 935
05a6538a 936 if (ipr_cmd->scsi_cmd)
f646f325 937 ipr_cmd->done = __ipr_scsi_eh_done;
05a6538a 938 else if (ipr_cmd->qc)
f646f325 939 ipr_cmd->done = __ipr_sata_eh_done;
1da177e4 940
05a6538a 941 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
942 IPR_IOASC_IOA_WAS_RESET);
943 del_timer(&ipr_cmd->timer);
944 ipr_cmd->done(ipr_cmd);
945 }
56d6aa33 946 spin_unlock(&hrrq->_lock);
1da177e4 947 }
1da177e4
LT
948 LEAVE;
949}
950
a32c055f
WB
951/**
952 * ipr_send_command - Send driver initiated requests.
953 * @ipr_cmd: ipr command struct
954 *
955 * This function sends a command to the adapter using the correct write call.
956 * In the case of sis64, calculate the ioarcb size required. Then or in the
957 * appropriate bits.
958 *
959 * Return value:
960 * none
961 **/
962static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963{
964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
965 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966
967 if (ioa_cfg->sis64) {
968 /* The default size is 256 bytes */
969 send_dma_addr |= 0x1;
970
971 /* If the number of ioadls * size of ioadl > 128 bytes,
972 then use a 512 byte ioarcb */
973 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
974 send_dma_addr |= 0x4;
975 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 } else
977 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
978}
979
1da177e4
LT
980/**
981 * ipr_do_req - Send driver initiated requests.
982 * @ipr_cmd: ipr command struct
983 * @done: done function
984 * @timeout_func: timeout function
985 * @timeout: timeout value
986 *
987 * This function sends the specified command to the adapter with the
988 * timeout given. The done function is invoked on command completion.
989 *
990 * Return value:
991 * none
992 **/
993static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
994 void (*done) (struct ipr_cmnd *),
738c6ec5 995 void (*timeout_func) (struct timer_list *), u32 timeout)
1da177e4 996{
05a6538a 997 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
998
999 ipr_cmd->done = done;
1000
1da177e4 1001 ipr_cmd->timer.expires = jiffies + timeout;
841b86f3 1002 ipr_cmd->timer.function = timeout_func;
1da177e4
LT
1003
1004 add_timer(&ipr_cmd->timer);
1005
1006 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007
a32c055f 1008 ipr_send_command(ipr_cmd);
1da177e4
LT
1009}
1010
1011/**
1012 * ipr_internal_cmd_done - Op done function for an internally generated op.
1013 * @ipr_cmd: ipr command struct
1014 *
1015 * This function is the op done function for an internally generated,
1016 * blocking op. It simply wakes the sleeping thread.
1017 *
1018 * Return value:
1019 * none
1020 **/
1021static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022{
1023 if (ipr_cmd->sibling)
1024 ipr_cmd->sibling = NULL;
1025 else
1026 complete(&ipr_cmd->completion);
1027}
1028
a32c055f
WB
1029/**
1030 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1031 * @ipr_cmd: ipr command struct
1032 * @dma_addr: dma address
1033 * @len: transfer length
1034 * @flags: ioadl flag value
1035 *
1036 * This function initializes an ioadl in the case where there is only a single
1037 * descriptor.
1038 *
1039 * Return value:
1040 * nothing
1041 **/
1042static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1043 u32 len, int flags)
1044{
1045 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1046 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047
1048 ipr_cmd->dma_use_sg = 1;
1049
1050 if (ipr_cmd->ioa_cfg->sis64) {
1051 ioadl64->flags = cpu_to_be32(flags);
1052 ioadl64->data_len = cpu_to_be32(len);
1053 ioadl64->address = cpu_to_be64(dma_addr);
1054
1055 ipr_cmd->ioarcb.ioadl_len =
1056 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1057 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058 } else {
1059 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1060 ioadl->address = cpu_to_be32(dma_addr);
1061
1062 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1063 ipr_cmd->ioarcb.read_ioadl_len =
1064 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1065 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066 } else {
1067 ipr_cmd->ioarcb.ioadl_len =
1068 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1069 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1070 }
1071 }
1072}
1073
1da177e4
LT
1074/**
1075 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1076 * @ipr_cmd: ipr command struct
1077 * @timeout_func: function to invoke if command times out
1078 * @timeout: timeout
1079 *
1080 * Return value:
1081 * none
1082 **/
1083static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
738c6ec5 1084 void (*timeout_func) (struct timer_list *),
1da177e4
LT
1085 u32 timeout)
1086{
1087 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088
1089 init_completion(&ipr_cmd->completion);
1090 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091
1092 spin_unlock_irq(ioa_cfg->host->host_lock);
1093 wait_for_completion(&ipr_cmd->completion);
1094 spin_lock_irq(ioa_cfg->host->host_lock);
1095}
1096
05a6538a 1097static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1098{
3f1c0581
BK
1099 unsigned int hrrq;
1100
05a6538a 1101 if (ioa_cfg->hrrq_num == 1)
3f1c0581
BK
1102 hrrq = 0;
1103 else {
1104 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1105 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1106 }
1107 return hrrq;
05a6538a 1108}
1109
1da177e4
LT
1110/**
1111 * ipr_send_hcam - Send an HCAM to the adapter.
1112 * @ioa_cfg: ioa config struct
1113 * @type: HCAM type
1114 * @hostrcb: hostrcb struct
1115 *
1116 * This function will send a Host Controlled Async command to the adapter.
1117 * If HCAMs are currently not allowed to be issued to the adapter, it will
1118 * place the hostrcb on the free queue.
1119 *
1120 * Return value:
1121 * none
1122 **/
1123static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1124 struct ipr_hostrcb *hostrcb)
1125{
1126 struct ipr_cmnd *ipr_cmd;
1127 struct ipr_ioarcb *ioarcb;
1128
56d6aa33 1129 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1130 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1131 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1132 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133
1134 ipr_cmd->u.hostrcb = hostrcb;
1135 ioarcb = &ipr_cmd->ioarcb;
1136
1137 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1138 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1139 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1140 ioarcb->cmd_pkt.cdb[1] = type;
1141 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1142 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143
a32c055f
WB
1144 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1145 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1146
1147 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1148 ipr_cmd->done = ipr_process_ccn;
1149 else
1150 ipr_cmd->done = ipr_process_error;
1151
1152 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153
a32c055f 1154 ipr_send_command(ipr_cmd);
1da177e4
LT
1155 } else {
1156 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1157 }
1158}
1159
3e7ebdfa
WB
1160/**
1161 * ipr_update_ata_class - Update the ata class in the resource entry
1162 * @res: resource entry struct
1163 * @proto: cfgte device bus protocol value
1164 *
1165 * Return value:
1166 * none
1167 **/
1168static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1169{
203fa3fe 1170 switch (proto) {
3e7ebdfa
WB
1171 case IPR_PROTO_SATA:
1172 case IPR_PROTO_SAS_STP:
1173 res->ata_class = ATA_DEV_ATA;
1174 break;
1175 case IPR_PROTO_SATA_ATAPI:
1176 case IPR_PROTO_SAS_STP_ATAPI:
1177 res->ata_class = ATA_DEV_ATAPI;
1178 break;
1179 default:
1180 res->ata_class = ATA_DEV_UNKNOWN;
1181 break;
1182 };
1183}
1184
1da177e4
LT
1185/**
1186 * ipr_init_res_entry - Initialize a resource entry struct.
1187 * @res: resource entry struct
3e7ebdfa 1188 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1189 *
1190 * Return value:
1191 * none
1192 **/
3e7ebdfa
WB
1193static void ipr_init_res_entry(struct ipr_resource_entry *res,
1194 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1195{
3e7ebdfa
WB
1196 int found = 0;
1197 unsigned int proto;
1198 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1199 struct ipr_resource_entry *gscsi_res = NULL;
1200
ee0a90fa 1201 res->needs_sync_complete = 0;
1da177e4
LT
1202 res->in_erp = 0;
1203 res->add_to_ml = 0;
1204 res->del_from_ml = 0;
1205 res->resetting_device = 0;
0b1f8d44 1206 res->reset_occurred = 0;
1da177e4 1207 res->sdev = NULL;
35a39691 1208 res->sata_port = NULL;
3e7ebdfa
WB
1209
1210 if (ioa_cfg->sis64) {
1211 proto = cfgtew->u.cfgte64->proto;
359d96e7
BK
1212 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1213 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
3e7ebdfa 1214 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1215 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1216
1217 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1218 sizeof(res->res_path));
1219
1220 res->bus = 0;
0cb992ed
WB
1221 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1222 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1223 res->lun = scsilun_to_int(&res->dev_lun);
1224
1225 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1226 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1227 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228 found = 1;
1229 res->target = gscsi_res->target;
1230 break;
1231 }
1232 }
1233 if (!found) {
1234 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1235 ioa_cfg->max_devs_supported);
1236 set_bit(res->target, ioa_cfg->target_ids);
1237 }
3e7ebdfa
WB
1238 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1239 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240 res->target = 0;
1241 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1242 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1243 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1244 ioa_cfg->max_devs_supported);
1245 set_bit(res->target, ioa_cfg->array_ids);
1246 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1247 res->bus = IPR_VSET_VIRTUAL_BUS;
1248 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1249 ioa_cfg->max_devs_supported);
1250 set_bit(res->target, ioa_cfg->vset_ids);
1251 } else {
1252 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1253 ioa_cfg->max_devs_supported);
1254 set_bit(res->target, ioa_cfg->target_ids);
1255 }
1256 } else {
1257 proto = cfgtew->u.cfgte->proto;
1258 res->qmodel = IPR_QUEUEING_MODEL(res);
1259 res->flags = cfgtew->u.cfgte->flags;
1260 if (res->flags & IPR_IS_IOA_RESOURCE)
1261 res->type = IPR_RES_TYPE_IOAFP;
1262 else
1263 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264
1265 res->bus = cfgtew->u.cfgte->res_addr.bus;
1266 res->target = cfgtew->u.cfgte->res_addr.target;
1267 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1268 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1269 }
1270
1271 ipr_update_ata_class(res, proto);
1272}
1273
1274/**
1275 * ipr_is_same_device - Determine if two devices are the same.
1276 * @res: resource entry struct
1277 * @cfgtew: config table entry wrapper struct
1278 *
1279 * Return value:
1280 * 1 if the devices are the same / 0 otherwise
1281 **/
1282static int ipr_is_same_device(struct ipr_resource_entry *res,
1283 struct ipr_config_table_entry_wrapper *cfgtew)
1284{
1285 if (res->ioa_cfg->sis64) {
1286 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1287 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1288 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1289 sizeof(cfgtew->u.cfgte64->lun))) {
1290 return 1;
1291 }
1292 } else {
1293 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1294 res->target == cfgtew->u.cfgte->res_addr.target &&
1295 res->lun == cfgtew->u.cfgte->res_addr.lun)
1296 return 1;
1297 }
1298
1299 return 0;
1300}
1301
1302/**
b3b3b407 1303 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1304 * @res_path: resource path
1305 * @buf: buffer
b3b3b407 1306 * @len: length of buffer provided
3e7ebdfa
WB
1307 *
1308 * Return value:
1309 * pointer to buffer
1310 **/
b3b3b407 1311static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1312{
1313 int i;
5adcbeb3 1314 char *p = buffer;
3e7ebdfa 1315
46d74563 1316 *p = '\0';
5adcbeb3
WB
1317 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1318 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1319 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1320
1321 return buffer;
1322}
1323
b3b3b407
BK
1324/**
1325 * ipr_format_res_path - Format the resource path for printing.
1326 * @ioa_cfg: ioa config struct
1327 * @res_path: resource path
1328 * @buf: buffer
1329 * @len: length of buffer provided
1330 *
1331 * Return value:
1332 * pointer to buffer
1333 **/
1334static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1335 u8 *res_path, char *buffer, int len)
1336{
1337 char *p = buffer;
1338
1339 *p = '\0';
1340 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1341 __ipr_format_res_path(res_path, p, len - (buffer - p));
1342 return buffer;
1343}
1344
3e7ebdfa
WB
1345/**
1346 * ipr_update_res_entry - Update the resource entry.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353static void ipr_update_res_entry(struct ipr_resource_entry *res,
1354 struct ipr_config_table_entry_wrapper *cfgtew)
1355{
1356 char buffer[IPR_MAX_RES_PATH_LENGTH];
1357 unsigned int proto;
1358 int new_path = 0;
1359
1360 if (res->ioa_cfg->sis64) {
359d96e7
BK
1361 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1362 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
75576bb9 1363 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1364
1365 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1366 sizeof(struct ipr_std_inq_data));
1367
1368 res->qmodel = IPR_QUEUEING_MODEL64(res);
1369 proto = cfgtew->u.cfgte64->proto;
1370 res->res_handle = cfgtew->u.cfgte64->res_handle;
1371 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372
1373 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1374 sizeof(res->dev_lun.scsi_lun));
1375
1376 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1377 sizeof(res->res_path))) {
1378 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1379 sizeof(res->res_path));
1380 new_path = 1;
1381 }
1382
1383 if (res->sdev && new_path)
1384 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1385 ipr_format_res_path(res->ioa_cfg,
1386 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1387 } else {
1388 res->flags = cfgtew->u.cfgte->flags;
1389 if (res->flags & IPR_IS_IOA_RESOURCE)
1390 res->type = IPR_RES_TYPE_IOAFP;
1391 else
1392 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393
1394 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1395 sizeof(struct ipr_std_inq_data));
1396
1397 res->qmodel = IPR_QUEUEING_MODEL(res);
1398 proto = cfgtew->u.cfgte->proto;
1399 res->res_handle = cfgtew->u.cfgte->res_handle;
1400 }
1401
1402 ipr_update_ata_class(res, proto);
1403}
1404
1405/**
1406 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * for the resource.
1408 * @res: resource entry struct
1409 * @cfgtew: config table entry wrapper struct
1410 *
1411 * Return value:
1412 * none
1413 **/
1414static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415{
1416 struct ipr_resource_entry *gscsi_res = NULL;
1417 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418
1419 if (!ioa_cfg->sis64)
1420 return;
1421
1422 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1423 clear_bit(res->target, ioa_cfg->array_ids);
1424 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1425 clear_bit(res->target, ioa_cfg->vset_ids);
1426 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1427 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1428 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429 return;
1430 clear_bit(res->target, ioa_cfg->target_ids);
1431
1432 } else if (res->bus == 0)
1433 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1434}
1435
1436/**
1437 * ipr_handle_config_change - Handle a config change from the adapter
1438 * @ioa_cfg: ioa config struct
1439 * @hostrcb: hostrcb
1440 *
1441 * Return value:
1442 * none
1443 **/
1444static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1445 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1446{
1447 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1448 struct ipr_config_table_entry_wrapper cfgtew;
1449 __be32 cc_res_handle;
1450
1da177e4
LT
1451 u32 is_ndn = 1;
1452
3e7ebdfa
WB
1453 if (ioa_cfg->sis64) {
1454 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1455 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456 } else {
1457 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1458 cc_res_handle = cfgtew.u.cfgte->res_handle;
1459 }
1da177e4
LT
1460
1461 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1462 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1463 is_ndn = 0;
1464 break;
1465 }
1466 }
1467
1468 if (is_ndn) {
1469 if (list_empty(&ioa_cfg->free_res_q)) {
1470 ipr_send_hcam(ioa_cfg,
1471 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1472 hostrcb);
1473 return;
1474 }
1475
1476 res = list_entry(ioa_cfg->free_res_q.next,
1477 struct ipr_resource_entry, queue);
1478
1479 list_del(&res->queue);
3e7ebdfa 1480 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1481 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1482 }
1483
3e7ebdfa 1484 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1485
1486 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487 if (res->sdev) {
1da177e4 1488 res->del_from_ml = 1;
3e7ebdfa 1489 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1490 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1491 } else {
1492 ipr_clear_res_target(res);
1da177e4 1493 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1494 }
5767a1c4 1495 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1496 res->add_to_ml = 1;
f688f96d 1497 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1498 }
1499
1500 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1501}
1502
1503/**
1504 * ipr_process_ccn - Op done function for a CCN.
1505 * @ipr_cmd: ipr command struct
1506 *
1507 * This function is the op done function for a configuration
1508 * change notification host controlled async from the adapter.
1509 *
1510 * Return value:
1511 * none
1512 **/
1513static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514{
1515 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1516 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1517 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 1518
afc3f83c 1519 list_del_init(&hostrcb->queue);
05a6538a 1520 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1521
1522 if (ioasc) {
4fdd7c7a
BK
1523 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1524 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1da177e4
LT
1525 dev_err(&ioa_cfg->pdev->dev,
1526 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527
1528 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529 } else {
1530 ipr_handle_config_change(ioa_cfg, hostrcb);
1531 }
1532}
1533
8cf093e2
BK
1534/**
1535 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1536 * @i: index into buffer
1537 * @buf: string to modify
1538 *
1539 * This function will strip all trailing whitespace, pad the end
1540 * of the string with a single space, and NULL terminate the string.
1541 *
1542 * Return value:
1543 * new length of string
1544 **/
1545static int strip_and_pad_whitespace(int i, char *buf)
1546{
1547 while (i && buf[i] == ' ')
1548 i--;
1549 buf[i+1] = ' ';
1550 buf[i+2] = '\0';
1551 return i + 2;
1552}
1553
1554/**
1555 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1556 * @prefix: string to print at start of printk
1557 * @hostrcb: hostrcb pointer
1558 * @vpd: vendor/product id/sn struct
1559 *
1560 * Return value:
1561 * none
1562 **/
1563static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1564 struct ipr_vpd *vpd)
1565{
1566 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1567 int i = 0;
1568
1569 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1570 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1571
1572 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1573 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1574
1575 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1576 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1577
1578 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1579}
1580
1da177e4
LT
1581/**
1582 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1583 * @vpd: vendor/product id/sn struct
1da177e4
LT
1584 *
1585 * Return value:
1586 * none
1587 **/
cfc32139 1588static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1589{
1590 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591 + IPR_SERIAL_NUM_LEN];
1592
cfc32139 1593 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1595 IPR_PROD_ID_LEN);
1596 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597 ipr_err("Vendor/Product ID: %s\n", buffer);
1598
cfc32139 1599 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1600 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601 ipr_err(" Serial Number: %s\n", buffer);
1602}
1603
8cf093e2
BK
1604/**
1605 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606 * @prefix: string to print at start of printk
1607 * @hostrcb: hostrcb pointer
1608 * @vpd: vendor/product id/sn/wwn struct
1609 *
1610 * Return value:
1611 * none
1612 **/
1613static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614 struct ipr_ext_vpd *vpd)
1615{
1616 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619}
1620
ee0f05b8 1621/**
1622 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623 * @vpd: vendor/product id/sn/wwn struct
1624 *
1625 * Return value:
1626 * none
1627 **/
1628static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629{
1630 ipr_log_vpd(&vpd->vpd);
1631 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632 be32_to_cpu(vpd->wwid[1]));
1633}
1634
1635/**
1636 * ipr_log_enhanced_cache_error - Log a cache error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1639 *
1640 * Return value:
1641 * none
1642 **/
1643static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1645{
4565e370
WB
1646 struct ipr_hostrcb_type_12_error *error;
1647
1648 if (ioa_cfg->sis64)
1649 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650 else
1651 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1652
1653 ipr_err("-----Current Configuration-----\n");
1654 ipr_err("Cache Directory Card Information:\n");
1655 ipr_log_ext_vpd(&error->ioa_vpd);
1656 ipr_err("Adapter Card Information:\n");
1657 ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659 ipr_err("-----Expected Configuration-----\n");
1660 ipr_err("Cache Directory Card Information:\n");
1661 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662 ipr_err("Adapter Card Information:\n");
1663 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666 be32_to_cpu(error->ioa_data[0]),
1667 be32_to_cpu(error->ioa_data[1]),
1668 be32_to_cpu(error->ioa_data[2]));
1669}
1670
1da177e4
LT
1671/**
1672 * ipr_log_cache_error - Log a cache error.
1673 * @ioa_cfg: ioa config struct
1674 * @hostrcb: hostrcb struct
1675 *
1676 * Return value:
1677 * none
1678 **/
1679static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680 struct ipr_hostrcb *hostrcb)
1681{
1682 struct ipr_hostrcb_type_02_error *error =
1683 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685 ipr_err("-----Current Configuration-----\n");
1686 ipr_err("Cache Directory Card Information:\n");
cfc32139 1687 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1688 ipr_err("Adapter Card Information:\n");
cfc32139 1689 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1690
1691 ipr_err("-----Expected Configuration-----\n");
1692 ipr_err("Cache Directory Card Information:\n");
cfc32139 1693 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1694 ipr_err("Adapter Card Information:\n");
cfc32139 1695 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1696
1697 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698 be32_to_cpu(error->ioa_data[0]),
1699 be32_to_cpu(error->ioa_data[1]),
1700 be32_to_cpu(error->ioa_data[2]));
1701}
1702
ee0f05b8 1703/**
1704 * ipr_log_enhanced_config_error - Log a configuration error.
1705 * @ioa_cfg: ioa config struct
1706 * @hostrcb: hostrcb struct
1707 *
1708 * Return value:
1709 * none
1710 **/
1711static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712 struct ipr_hostrcb *hostrcb)
1713{
1714 int errors_logged, i;
1715 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716 struct ipr_hostrcb_type_13_error *error;
1717
1718 error = &hostrcb->hcam.u.error.u.type_13_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1720
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724 dev_entry = error->dev;
1725
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1727 ipr_err_separator;
1728
1729 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732 ipr_err("-----New Device Information-----\n");
1733 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735 ipr_err("Cache Directory Card Information:\n");
1736 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738 ipr_err("Adapter Card Information:\n");
1739 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740 }
1741}
1742
4565e370
WB
1743/**
1744 * ipr_log_sis64_config_error - Log a device error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1747 *
1748 * Return value:
1749 * none
1750 **/
1751static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752 struct ipr_hostrcb *hostrcb)
1753{
1754 int errors_logged, i;
1755 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756 struct ipr_hostrcb_type_23_error *error;
1757 char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759 error = &hostrcb->hcam.u.error64.u.type_23_error;
1760 errors_logged = be32_to_cpu(error->errors_logged);
1761
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765 dev_entry = error->dev;
1766
1767 for (i = 0; i < errors_logged; i++, dev_entry++) {
1768 ipr_err_separator;
1769
1770 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1771 __ipr_format_res_path(dev_entry->res_path,
1772 buffer, sizeof(buffer)));
4565e370
WB
1773 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775 ipr_err("-----New Device Information-----\n");
1776 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778 ipr_err("Cache Directory Card Information:\n");
1779 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781 ipr_err("Adapter Card Information:\n");
1782 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783 }
1784}
1785
1da177e4
LT
1786/**
1787 * ipr_log_config_error - Log a configuration error.
1788 * @ioa_cfg: ioa config struct
1789 * @hostrcb: hostrcb struct
1790 *
1791 * Return value:
1792 * none
1793 **/
1794static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795 struct ipr_hostrcb *hostrcb)
1796{
1797 int errors_logged, i;
1798 struct ipr_hostrcb_device_data_entry *dev_entry;
1799 struct ipr_hostrcb_type_03_error *error;
1800
1801 error = &hostrcb->hcam.u.error.u.type_03_error;
1802 errors_logged = be32_to_cpu(error->errors_logged);
1803
1804 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805 be32_to_cpu(error->errors_detected), errors_logged);
1806
cfc32139 1807 dev_entry = error->dev;
1da177e4
LT
1808
1809 for (i = 0; i < errors_logged; i++, dev_entry++) {
1810 ipr_err_separator;
1811
fa15b1f6 1812 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1813 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1814
1815 ipr_err("-----New Device Information-----\n");
cfc32139 1816 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1817
1818 ipr_err("Cache Directory Card Information:\n");
cfc32139 1819 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1820
1821 ipr_err("Adapter Card Information:\n");
cfc32139 1822 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1823
1824 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825 be32_to_cpu(dev_entry->ioa_data[0]),
1826 be32_to_cpu(dev_entry->ioa_data[1]),
1827 be32_to_cpu(dev_entry->ioa_data[2]),
1828 be32_to_cpu(dev_entry->ioa_data[3]),
1829 be32_to_cpu(dev_entry->ioa_data[4]));
1830 }
1831}
1832
ee0f05b8 1833/**
1834 * ipr_log_enhanced_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1837 *
1838 * Return value:
1839 * none
1840 **/
1841static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842 struct ipr_hostrcb *hostrcb)
1843{
1844 int i, num_entries;
1845 struct ipr_hostrcb_type_14_error *error;
1846 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849 error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851 ipr_err_separator;
1852
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error->protection_level,
1855 ioa_cfg->host->host_no,
1856 error->last_func_vset_res_addr.bus,
1857 error->last_func_vset_res_addr.target,
1858 error->last_func_vset_res_addr.lun);
1859
1860 ipr_err_separator;
1861
1862 array_entry = error->array_member;
1863 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1864 ARRAY_SIZE(error->array_member));
ee0f05b8 1865
1866 for (i = 0; i < num_entries; i++, array_entry++) {
1867 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868 continue;
1869
1870 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871 ipr_err("Exposed Array Member %d:\n", i);
1872 else
1873 ipr_err("Array Member %d:\n", i);
1874
1875 ipr_log_ext_vpd(&array_entry->vpd);
1876 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878 "Expected Location");
1879
1880 ipr_err_separator;
1881 }
1882}
1883
1da177e4
LT
1884/**
1885 * ipr_log_array_error - Log an array configuration error.
1886 * @ioa_cfg: ioa config struct
1887 * @hostrcb: hostrcb struct
1888 *
1889 * Return value:
1890 * none
1891 **/
1892static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893 struct ipr_hostrcb *hostrcb)
1894{
1895 int i;
1896 struct ipr_hostrcb_type_04_error *error;
1897 struct ipr_hostrcb_array_data_entry *array_entry;
1898 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900 error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902 ipr_err_separator;
1903
1904 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905 error->protection_level,
1906 ioa_cfg->host->host_no,
1907 error->last_func_vset_res_addr.bus,
1908 error->last_func_vset_res_addr.target,
1909 error->last_func_vset_res_addr.lun);
1910
1911 ipr_err_separator;
1912
1913 array_entry = error->array_member;
1914
1915 for (i = 0; i < 18; i++) {
cfc32139 1916 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1917 continue;
1918
fa15b1f6 1919 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1920 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1921 else
1da177e4 1922 ipr_err("Array Member %d:\n", i);
1da177e4 1923
cfc32139 1924 ipr_log_vpd(&array_entry->vpd);
1da177e4 1925
fa15b1f6 1926 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928 "Expected Location");
1da177e4
LT
1929
1930 ipr_err_separator;
1931
1932 if (i == 9)
1933 array_entry = error->array_member2;
1934 else
1935 array_entry++;
1936 }
1937}
1938
1939/**
b0df54bb 1940 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1941 * @ioa_cfg: ioa config struct
b0df54bb 1942 * @data: IOA error data
1943 * @len: data length
1da177e4
LT
1944 *
1945 * Return value:
1946 * none
1947 **/
359d96e7 1948static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1da177e4
LT
1949{
1950 int i;
1da177e4 1951
b0df54bb 1952 if (len == 0)
1da177e4
LT
1953 return;
1954
ac719aba
BK
1955 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
b0df54bb 1958 for (i = 0; i < len / 4; i += 4) {
1da177e4 1959 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1960 be32_to_cpu(data[i]),
1961 be32_to_cpu(data[i+1]),
1962 be32_to_cpu(data[i+2]),
1963 be32_to_cpu(data[i+3]));
1da177e4
LT
1964 }
1965}
1966
ee0f05b8 1967/**
1968 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969 * @ioa_cfg: ioa config struct
1970 * @hostrcb: hostrcb struct
1971 *
1972 * Return value:
1973 * none
1974 **/
1975static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976 struct ipr_hostrcb *hostrcb)
1977{
1978 struct ipr_hostrcb_type_17_error *error;
1979
4565e370
WB
1980 if (ioa_cfg->sis64)
1981 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982 else
1983 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
ee0f05b8 1985 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1986 strim(error->failure_reason);
ee0f05b8 1987
8cf093e2
BK
1988 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989 be32_to_cpu(hostrcb->hcam.u.error.prc));
1990 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1991 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1992 be32_to_cpu(hostrcb->hcam.length) -
1993 (offsetof(struct ipr_hostrcb_error, u) +
1994 offsetof(struct ipr_hostrcb_type_17_error, data)));
1995}
1996
b0df54bb 1997/**
1998 * ipr_log_dual_ioa_error - Log a dual adapter error.
1999 * @ioa_cfg: ioa config struct
2000 * @hostrcb: hostrcb struct
2001 *
2002 * Return value:
2003 * none
2004 **/
2005static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006 struct ipr_hostrcb *hostrcb)
2007{
2008 struct ipr_hostrcb_type_07_error *error;
2009
2010 error = &hostrcb->hcam.u.error.u.type_07_error;
2011 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 2012 strim(error->failure_reason);
b0df54bb 2013
8cf093e2
BK
2014 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015 be32_to_cpu(hostrcb->hcam.u.error.prc));
2016 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 2017 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 2018 be32_to_cpu(hostrcb->hcam.length) -
2019 (offsetof(struct ipr_hostrcb_error, u) +
2020 offsetof(struct ipr_hostrcb_type_07_error, data)));
2021}
2022
49dc6a18
BK
2023static const struct {
2024 u8 active;
2025 char *desc;
2026} path_active_desc[] = {
2027 { IPR_PATH_NO_INFO, "Path" },
2028 { IPR_PATH_ACTIVE, "Active path" },
2029 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030};
2031
2032static const struct {
2033 u8 state;
2034 char *desc;
2035} path_state_desc[] = {
2036 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037 { IPR_PATH_HEALTHY, "is healthy" },
2038 { IPR_PATH_DEGRADED, "is degraded" },
2039 { IPR_PATH_FAILED, "is failed" }
2040};
2041
2042/**
2043 * ipr_log_fabric_path - Log a fabric path error
2044 * @hostrcb: hostrcb struct
2045 * @fabric: fabric descriptor
2046 *
2047 * Return value:
2048 * none
2049 **/
2050static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051 struct ipr_hostrcb_fabric_desc *fabric)
2052{
2053 int i, j;
2054 u8 path_state = fabric->path_state;
2055 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056 u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059 if (path_active_desc[i].active != active)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063 if (path_state_desc[j].state != state)
2064 continue;
2065
2066 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068 path_active_desc[i].desc, path_state_desc[j].desc,
2069 fabric->ioa_port);
2070 } else if (fabric->cascaded_expander == 0xff) {
2071 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072 path_active_desc[i].desc, path_state_desc[j].desc,
2073 fabric->ioa_port, fabric->phy);
2074 } else if (fabric->phy == 0xff) {
2075 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
2077 fabric->ioa_port, fabric->cascaded_expander);
2078 } else {
2079 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080 path_active_desc[i].desc, path_state_desc[j].desc,
2081 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082 }
2083 return;
2084 }
2085 }
2086
2087 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089}
2090
4565e370
WB
2091/**
2092 * ipr_log64_fabric_path - Log a fabric path error
2093 * @hostrcb: hostrcb struct
2094 * @fabric: fabric descriptor
2095 *
2096 * Return value:
2097 * none
2098 **/
2099static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100 struct ipr_hostrcb64_fabric_desc *fabric)
2101{
2102 int i, j;
2103 u8 path_state = fabric->path_state;
2104 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105 u8 state = path_state & IPR_PATH_STATE_MASK;
2106 char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109 if (path_active_desc[i].active != active)
2110 continue;
2111
2112 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113 if (path_state_desc[j].state != state)
2114 continue;
2115
2116 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2118 ipr_format_res_path(hostrcb->ioa_cfg,
2119 fabric->res_path,
2120 buffer, sizeof(buffer)));
4565e370
WB
2121 return;
2122 }
2123 }
2124
2125 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2126 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127 buffer, sizeof(buffer)));
4565e370
WB
2128}
2129
49dc6a18
BK
2130static const struct {
2131 u8 type;
2132 char *desc;
2133} path_type_desc[] = {
2134 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138};
2139
2140static const struct {
2141 u8 status;
2142 char *desc;
2143} path_status_desc[] = {
2144 { IPR_PATH_CFG_NO_PROB, "Functional" },
2145 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146 { IPR_PATH_CFG_FAILED, "Failed" },
2147 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148 { IPR_PATH_NOT_DETECTED, "Missing" },
2149 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150};
2151
2152static const char *link_rate[] = {
2153 "unknown",
2154 "disabled",
2155 "phy reset problem",
2156 "spinup hold",
2157 "port selector",
2158 "unknown",
2159 "unknown",
2160 "unknown",
2161 "1.5Gbps",
2162 "3.0Gbps",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown",
2168 "unknown"
2169};
2170
2171/**
2172 * ipr_log_path_elem - Log a fabric path element.
2173 * @hostrcb: hostrcb struct
2174 * @cfg: fabric path element struct
2175 *
2176 * Return value:
2177 * none
2178 **/
2179static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180 struct ipr_hostrcb_config_element *cfg)
2181{
2182 int i, j;
2183 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186 if (type == IPR_PATH_CFG_NOT_EXIST)
2187 return;
2188
2189 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190 if (path_type_desc[i].type != type)
2191 continue;
2192
2193 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194 if (path_status_desc[j].status != status)
2195 continue;
2196
2197 if (type == IPR_PATH_CFG_IOA_PORT) {
2198 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199 path_status_desc[j].desc, path_type_desc[i].desc,
2200 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202 } else {
2203 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
2206 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208 } else if (cfg->cascaded_expander == 0xff) {
2209 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210 "WWN=%08X%08X\n", path_status_desc[j].desc,
2211 path_type_desc[i].desc, cfg->phy,
2212 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214 } else if (cfg->phy == 0xff) {
2215 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216 "WWN=%08X%08X\n", path_status_desc[j].desc,
2217 path_type_desc[i].desc, cfg->cascaded_expander,
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220 } else {
2221 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222 "WWN=%08X%08X\n", path_status_desc[j].desc,
2223 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226 }
2227 }
2228 return;
2229 }
2230 }
2231
2232 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236}
2237
4565e370
WB
2238/**
2239 * ipr_log64_path_elem - Log a fabric path element.
2240 * @hostrcb: hostrcb struct
2241 * @cfg: fabric path element struct
2242 *
2243 * Return value:
2244 * none
2245 **/
2246static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247 struct ipr_hostrcb64_config_element *cfg)
2248{
2249 int i, j;
2250 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253 char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256 return;
2257
2258 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259 if (path_type_desc[i].type != type)
2260 continue;
2261
2262 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263 if (path_status_desc[j].status != status)
2264 continue;
2265
2266 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2268 ipr_format_res_path(hostrcb->ioa_cfg,
2269 cfg->res_path, buffer, sizeof(buffer)),
2270 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271 be32_to_cpu(cfg->wwid[0]),
2272 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2273 return;
2274 }
2275 }
2276 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2278 ipr_format_res_path(hostrcb->ioa_cfg,
2279 cfg->res_path, buffer, sizeof(buffer)),
2280 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2282}
2283
49dc6a18
BK
2284/**
2285 * ipr_log_fabric_error - Log a fabric error.
2286 * @ioa_cfg: ioa config struct
2287 * @hostrcb: hostrcb struct
2288 *
2289 * Return value:
2290 * none
2291 **/
2292static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293 struct ipr_hostrcb *hostrcb)
2294{
2295 struct ipr_hostrcb_type_20_error *error;
2296 struct ipr_hostrcb_fabric_desc *fabric;
2297 struct ipr_hostrcb_config_element *cfg;
2298 int i, add_len;
2299
2300 error = &hostrcb->hcam.u.error.u.type_20_error;
2301 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304 add_len = be32_to_cpu(hostrcb->hcam.length) -
2305 (offsetof(struct ipr_hostrcb_error, u) +
2306 offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309 ipr_log_fabric_path(hostrcb, fabric);
2310 for_each_fabric_cfg(fabric, cfg)
2311 ipr_log_path_elem(hostrcb, cfg);
2312
2313 add_len -= be16_to_cpu(fabric->length);
2314 fabric = (struct ipr_hostrcb_fabric_desc *)
2315 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316 }
2317
359d96e7 2318 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
49dc6a18
BK
2319}
2320
4565e370
WB
2321/**
2322 * ipr_log_sis64_array_error - Log a sis64 array error.
2323 * @ioa_cfg: ioa config struct
2324 * @hostrcb: hostrcb struct
2325 *
2326 * Return value:
2327 * none
2328 **/
2329static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330 struct ipr_hostrcb *hostrcb)
2331{
2332 int i, num_entries;
2333 struct ipr_hostrcb_type_24_error *error;
2334 struct ipr_hostrcb64_array_data_entry *array_entry;
2335 char buffer[IPR_MAX_RES_PATH_LENGTH];
2336 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338 error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340 ipr_err_separator;
2341
2342 ipr_err("RAID %s Array Configuration: %s\n",
2343 error->protection_level,
b3b3b407
BK
2344 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345 buffer, sizeof(buffer)));
4565e370
WB
2346
2347 ipr_err_separator;
2348
2349 array_entry = error->array_member;
7262026f
WB
2350 num_entries = min_t(u32, error->num_entries,
2351 ARRAY_SIZE(error->array_member));
4565e370
WB
2352
2353 for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356 continue;
2357
2358 if (error->exposed_mode_adn == i)
2359 ipr_err("Exposed Array Member %d:\n", i);
2360 else
2361 ipr_err("Array Member %d:\n", i);
2362
2363 ipr_err("Array Member %d:\n", i);
2364 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2365 ipr_err("Current Location: %s\n",
b3b3b407
BK
2366 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367 buffer, sizeof(buffer)));
7262026f 2368 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2369 ipr_format_res_path(ioa_cfg,
2370 array_entry->expected_res_path,
2371 buffer, sizeof(buffer)));
4565e370
WB
2372
2373 ipr_err_separator;
2374 }
2375}
2376
2377/**
2378 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379 * @ioa_cfg: ioa config struct
2380 * @hostrcb: hostrcb struct
2381 *
2382 * Return value:
2383 * none
2384 **/
2385static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386 struct ipr_hostrcb *hostrcb)
2387{
2388 struct ipr_hostrcb_type_30_error *error;
2389 struct ipr_hostrcb64_fabric_desc *fabric;
2390 struct ipr_hostrcb64_config_element *cfg;
2391 int i, add_len;
2392
2393 error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398 add_len = be32_to_cpu(hostrcb->hcam.length) -
2399 (offsetof(struct ipr_hostrcb64_error, u) +
2400 offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403 ipr_log64_fabric_path(hostrcb, fabric);
2404 for_each_fabric_cfg(fabric, cfg)
2405 ipr_log64_path_elem(hostrcb, cfg);
2406
2407 add_len -= be16_to_cpu(fabric->length);
2408 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410 }
2411
359d96e7 2412 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
4565e370
WB
2413}
2414
b0df54bb 2415/**
2416 * ipr_log_generic_error - Log an adapter error.
2417 * @ioa_cfg: ioa config struct
2418 * @hostrcb: hostrcb struct
2419 *
2420 * Return value:
2421 * none
2422 **/
2423static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424 struct ipr_hostrcb *hostrcb)
2425{
ac719aba 2426 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2427 be32_to_cpu(hostrcb->hcam.length));
2428}
2429
169b9ec8
WX
2430/**
2431 * ipr_log_sis64_device_error - Log a cache error.
2432 * @ioa_cfg: ioa config struct
2433 * @hostrcb: hostrcb struct
2434 *
2435 * Return value:
2436 * none
2437 **/
2438static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439 struct ipr_hostrcb *hostrcb)
2440{
2441 struct ipr_hostrcb_type_21_error *error;
2442 char buffer[IPR_MAX_RES_PATH_LENGTH];
2443
2444 error = &hostrcb->hcam.u.error64.u.type_21_error;
2445
2446 ipr_err("-----Failing Device Information-----\n");
2447 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450 ipr_err("Device Resource Path: %s\n",
2451 __ipr_format_res_path(error->res_path,
2452 buffer, sizeof(buffer)));
2453 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2457 ipr_err("SCSI Sense Data:\n");
2458 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459 ipr_err("SCSI Command Descriptor Block: \n");
2460 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2461
2462 ipr_err("Additional IOA Data:\n");
2463 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2464}
2465
1da177e4
LT
2466/**
2467 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468 * @ioasc: IOASC
2469 *
2470 * This function will return the index of into the ipr_error_table
2471 * for the specified IOASC. If the IOASC is not in the table,
2472 * 0 will be returned, which points to the entry used for unknown errors.
2473 *
2474 * Return value:
2475 * index into the ipr_error_table
2476 **/
2477static u32 ipr_get_error(u32 ioasc)
2478{
2479 int i;
2480
2481 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2482 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2483 return i;
2484
2485 return 0;
2486}
2487
2488/**
2489 * ipr_handle_log_data - Log an adapter error.
2490 * @ioa_cfg: ioa config struct
2491 * @hostrcb: hostrcb struct
2492 *
2493 * This function logs an adapter error to the system.
2494 *
2495 * Return value:
2496 * none
2497 **/
2498static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499 struct ipr_hostrcb *hostrcb)
2500{
2501 u32 ioasc;
2502 int error_index;
3185ea63 2503 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2504
2505 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2506 return;
2507
2508 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2510
4565e370
WB
2511 if (ioa_cfg->sis64)
2512 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2513 else
2514 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2515
4565e370
WB
2516 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2518 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2520 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2521 }
2522
2523 error_index = ipr_get_error(ioasc);
2524
2525 if (!ipr_error_table[error_index].log_hcam)
2526 return;
2527
3185ea63 2528 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530 error = &hostrcb->hcam.u.error64.u.type_21_error;
2531
2532 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2534 return;
2535 }
2536
49dc6a18 2537 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2538
2539 /* Set indication we have logged an error */
2540 ioa_cfg->errors_logged++;
2541
933916f3 2542 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2543 return;
cf852037 2544 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2546
2547 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2548 case IPR_HOST_RCB_OVERLAY_ID_2:
2549 ipr_log_cache_error(ioa_cfg, hostrcb);
2550 break;
2551 case IPR_HOST_RCB_OVERLAY_ID_3:
2552 ipr_log_config_error(ioa_cfg, hostrcb);
2553 break;
2554 case IPR_HOST_RCB_OVERLAY_ID_4:
2555 case IPR_HOST_RCB_OVERLAY_ID_6:
2556 ipr_log_array_error(ioa_cfg, hostrcb);
2557 break;
b0df54bb 2558 case IPR_HOST_RCB_OVERLAY_ID_7:
2559 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2560 break;
ee0f05b8 2561 case IPR_HOST_RCB_OVERLAY_ID_12:
2562 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2563 break;
2564 case IPR_HOST_RCB_OVERLAY_ID_13:
2565 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2566 break;
2567 case IPR_HOST_RCB_OVERLAY_ID_14:
2568 case IPR_HOST_RCB_OVERLAY_ID_16:
2569 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2570 break;
2571 case IPR_HOST_RCB_OVERLAY_ID_17:
2572 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2573 break;
49dc6a18
BK
2574 case IPR_HOST_RCB_OVERLAY_ID_20:
2575 ipr_log_fabric_error(ioa_cfg, hostrcb);
2576 break;
169b9ec8
WX
2577 case IPR_HOST_RCB_OVERLAY_ID_21:
2578 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2579 break;
4565e370
WB
2580 case IPR_HOST_RCB_OVERLAY_ID_23:
2581 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2582 break;
2583 case IPR_HOST_RCB_OVERLAY_ID_24:
2584 case IPR_HOST_RCB_OVERLAY_ID_26:
2585 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2586 break;
2587 case IPR_HOST_RCB_OVERLAY_ID_30:
2588 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2589 break;
cf852037 2590 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2591 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2592 default:
a9cfca96 2593 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2594 break;
2595 }
2596}
2597
afc3f83c
BK
2598static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2599{
2600 struct ipr_hostrcb *hostrcb;
2601
2602 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603 struct ipr_hostrcb, queue);
2604
2605 if (unlikely(!hostrcb)) {
2606 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608 struct ipr_hostrcb, queue);
2609 }
2610
2611 list_del_init(&hostrcb->queue);
2612 return hostrcb;
2613}
2614
1da177e4
LT
2615/**
2616 * ipr_process_error - Op done function for an adapter error log.
2617 * @ipr_cmd: ipr command struct
2618 *
2619 * This function is the op done function for an error log host
2620 * controlled async from the adapter. It will log the error and
2621 * send the HCAM back to the adapter.
2622 *
2623 * Return value:
2624 * none
2625 **/
2626static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2627{
2628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2630 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2631 u32 fd_ioasc;
2632
2633 if (ioa_cfg->sis64)
2634 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2635 else
2636 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2637
afc3f83c 2638 list_del_init(&hostrcb->queue);
05a6538a 2639 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2640
2641 if (!ioasc) {
2642 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2643 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4fdd7c7a
BK
2645 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
1da177e4
LT
2647 dev_err(&ioa_cfg->pdev->dev,
2648 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2649 }
2650
afc3f83c 2651 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
8a4236a2 2652 schedule_work(&ioa_cfg->work_q);
afc3f83c 2653 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
afc3f83c 2654
1da177e4
LT
2655 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2656}
2657
2658/**
2659 * ipr_timeout - An internally generated op has timed out.
2660 * @ipr_cmd: ipr command struct
2661 *
2662 * This function blocks host requests and initiates an
2663 * adapter reset.
2664 *
2665 * Return value:
2666 * none
2667 **/
738c6ec5 2668static void ipr_timeout(struct timer_list *t)
1da177e4 2669{
738c6ec5 2670 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
2671 unsigned long lock_flags = 0;
2672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2673
2674 ENTER;
2675 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2676
2677 ioa_cfg->errors_logged++;
2678 dev_err(&ioa_cfg->pdev->dev,
2679 "Adapter being reset due to command timeout.\n");
2680
2681 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2682 ioa_cfg->sdt_state = GET_DUMP;
2683
2684 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2685 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2686
2687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 LEAVE;
2689}
2690
2691/**
2692 * ipr_oper_timeout - Adapter timed out transitioning to operational
2693 * @ipr_cmd: ipr command struct
2694 *
2695 * This function blocks host requests and initiates an
2696 * adapter reset.
2697 *
2698 * Return value:
2699 * none
2700 **/
738c6ec5 2701static void ipr_oper_timeout(struct timer_list *t)
1da177e4 2702{
738c6ec5 2703 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
2704 unsigned long lock_flags = 0;
2705 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2706
2707 ENTER;
2708 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709
2710 ioa_cfg->errors_logged++;
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Adapter timed out transitioning to operational.\n");
2713
2714 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2715 ioa_cfg->sdt_state = GET_DUMP;
2716
2717 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2718 if (ipr_fastfail)
2719 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2720 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2721 }
2722
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 LEAVE;
2725}
2726
1da177e4
LT
2727/**
2728 * ipr_find_ses_entry - Find matching SES in SES table
2729 * @res: resource entry struct of SES
2730 *
2731 * Return value:
2732 * pointer to SES table entry / NULL on failure
2733 **/
2734static const struct ipr_ses_table_entry *
2735ipr_find_ses_entry(struct ipr_resource_entry *res)
2736{
2737 int i, j, matches;
3e7ebdfa 2738 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2739 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2740
2741 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2742 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2743 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2744 vpids = &res->std_inq_data.vpids;
2745 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2746 matches++;
2747 else
2748 break;
2749 } else
2750 matches++;
2751 }
2752
2753 if (matches == IPR_PROD_ID_LEN)
2754 return ste;
2755 }
2756
2757 return NULL;
2758}
2759
2760/**
2761 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2762 * @ioa_cfg: ioa config struct
2763 * @bus: SCSI bus
2764 * @bus_width: bus width
2765 *
2766 * Return value:
2767 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2768 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2769 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2770 * max 160MHz = max 320MB/sec).
2771 **/
2772static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2773{
2774 struct ipr_resource_entry *res;
2775 const struct ipr_ses_table_entry *ste;
2776 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2777
2778 /* Loop through each config table entry in the config table buffer */
2779 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2780 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2781 continue;
2782
3e7ebdfa 2783 if (bus != res->bus)
1da177e4
LT
2784 continue;
2785
2786 if (!(ste = ipr_find_ses_entry(res)))
2787 continue;
2788
2789 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2790 }
2791
2792 return max_xfer_rate;
2793}
2794
2795/**
2796 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2797 * @ioa_cfg: ioa config struct
2798 * @max_delay: max delay in micro-seconds to wait
2799 *
2800 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801 *
2802 * Return value:
2803 * 0 on success / other on failure
2804 **/
2805static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2806{
2807 volatile u32 pcii_reg;
2808 int delay = 1;
2809
2810 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2811 while (delay < max_delay) {
2812 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2813
2814 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2815 return 0;
2816
2817 /* udelay cannot be used if delay is more than a few milliseconds */
2818 if ((delay / 1000) > MAX_UDELAY_MS)
2819 mdelay(delay / 1000);
2820 else
2821 udelay(delay);
2822
2823 delay += delay;
2824 }
2825 return -EIO;
2826}
2827
dcbad00e
WB
2828/**
2829 * ipr_get_sis64_dump_data_section - Dump IOA memory
2830 * @ioa_cfg: ioa config struct
2831 * @start_addr: adapter address to dump
2832 * @dest: destination kernel buffer
2833 * @length_in_words: length to dump in 4 byte words
2834 *
2835 * Return value:
2836 * 0 on success
2837 **/
2838static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2839 u32 start_addr,
2840 __be32 *dest, u32 length_in_words)
2841{
2842 int i;
2843
2844 for (i = 0; i < length_in_words; i++) {
2845 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2846 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2847 dest++;
2848 }
2849
2850 return 0;
2851}
2852
1da177e4
LT
2853/**
2854 * ipr_get_ldump_data_section - Dump IOA memory
2855 * @ioa_cfg: ioa config struct
2856 * @start_addr: adapter address to dump
2857 * @dest: destination kernel buffer
2858 * @length_in_words: length to dump in 4 byte words
2859 *
2860 * Return value:
2861 * 0 on success / -EIO on failure
2862 **/
2863static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2864 u32 start_addr,
2865 __be32 *dest, u32 length_in_words)
2866{
2867 volatile u32 temp_pcii_reg;
2868 int i, delay = 0;
2869
dcbad00e
WB
2870 if (ioa_cfg->sis64)
2871 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2872 dest, length_in_words);
2873
1da177e4
LT
2874 /* Write IOA interrupt reg starting LDUMP state */
2875 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2876 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2877
2878 /* Wait for IO debug acknowledge */
2879 if (ipr_wait_iodbg_ack(ioa_cfg,
2880 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2881 dev_err(&ioa_cfg->pdev->dev,
2882 "IOA dump long data transfer timeout\n");
2883 return -EIO;
2884 }
2885
2886 /* Signal LDUMP interlocked - clear IO debug ack */
2887 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2888 ioa_cfg->regs.clr_interrupt_reg);
2889
2890 /* Write Mailbox with starting address */
2891 writel(start_addr, ioa_cfg->ioa_mailbox);
2892
2893 /* Signal address valid - clear IOA Reset alert */
2894 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2895 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2896
2897 for (i = 0; i < length_in_words; i++) {
2898 /* Wait for IO debug acknowledge */
2899 if (ipr_wait_iodbg_ack(ioa_cfg,
2900 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2901 dev_err(&ioa_cfg->pdev->dev,
2902 "IOA dump short data transfer timeout\n");
2903 return -EIO;
2904 }
2905
2906 /* Read data from mailbox and increment destination pointer */
2907 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2908 dest++;
2909
2910 /* For all but the last word of data, signal data received */
2911 if (i < (length_in_words - 1)) {
2912 /* Signal dump data received - Clear IO debug Ack */
2913 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2914 ioa_cfg->regs.clr_interrupt_reg);
2915 }
2916 }
2917
2918 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2919 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2920 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2921
2922 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2923 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2924
2925 /* Signal dump data received - Clear IO debug Ack */
2926 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2927 ioa_cfg->regs.clr_interrupt_reg);
2928
2929 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2930 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2931 temp_pcii_reg =
214777ba 2932 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2933
2934 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2935 return 0;
2936
2937 udelay(10);
2938 delay += 10;
2939 }
2940
2941 return 0;
2942}
2943
2944#ifdef CONFIG_SCSI_IPR_DUMP
2945/**
2946 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2947 * @ioa_cfg: ioa config struct
2948 * @pci_address: adapter address
2949 * @length: length of data to copy
2950 *
2951 * Copy data from PCI adapter to kernel buffer.
2952 * Note: length MUST be a 4 byte multiple
2953 * Return value:
2954 * 0 on success / other on failure
2955 **/
2956static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2957 unsigned long pci_address, u32 length)
2958{
2959 int bytes_copied = 0;
4d4dd706 2960 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2961 __be32 *page;
2962 unsigned long lock_flags = 0;
2963 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2964
4d4dd706
KSS
2965 if (ioa_cfg->sis64)
2966 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2967 else
2968 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2969
1da177e4 2970 while (bytes_copied < length &&
4d4dd706 2971 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2972 if (ioa_dump->page_offset >= PAGE_SIZE ||
2973 ioa_dump->page_offset == 0) {
2974 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2975
2976 if (!page) {
2977 ipr_trace;
2978 return bytes_copied;
2979 }
2980
2981 ioa_dump->page_offset = 0;
2982 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2983 ioa_dump->next_page_index++;
2984 } else
2985 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2986
2987 rem_len = length - bytes_copied;
2988 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2989 cur_len = min(rem_len, rem_page_len);
2990
2991 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2992 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2993 rc = -EIO;
2994 } else {
2995 rc = ipr_get_ldump_data_section(ioa_cfg,
2996 pci_address + bytes_copied,
2997 &page[ioa_dump->page_offset / 4],
2998 (cur_len / sizeof(u32)));
2999 }
3000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001
3002 if (!rc) {
3003 ioa_dump->page_offset += cur_len;
3004 bytes_copied += cur_len;
3005 } else {
3006 ipr_trace;
3007 break;
3008 }
3009 schedule();
3010 }
3011
3012 return bytes_copied;
3013}
3014
3015/**
3016 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3017 * @hdr: dump entry header struct
3018 *
3019 * Return value:
3020 * nothing
3021 **/
3022static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3023{
3024 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3025 hdr->num_elems = 1;
3026 hdr->offset = sizeof(*hdr);
3027 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3028}
3029
3030/**
3031 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3032 * @ioa_cfg: ioa config struct
3033 * @driver_dump: driver dump struct
3034 *
3035 * Return value:
3036 * nothing
3037 **/
3038static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3039 struct ipr_driver_dump *driver_dump)
3040{
3041 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3042
3043 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3044 driver_dump->ioa_type_entry.hdr.len =
3045 sizeof(struct ipr_dump_ioa_type_entry) -
3046 sizeof(struct ipr_dump_entry_header);
3047 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3048 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3049 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3050 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3051 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3052 ucode_vpd->minor_release[1];
3053 driver_dump->hdr.num_entries++;
3054}
3055
3056/**
3057 * ipr_dump_version_data - Fill in the driver version in the dump.
3058 * @ioa_cfg: ioa config struct
3059 * @driver_dump: driver dump struct
3060 *
3061 * Return value:
3062 * nothing
3063 **/
3064static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3065 struct ipr_driver_dump *driver_dump)
3066{
3067 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3068 driver_dump->version_entry.hdr.len =
3069 sizeof(struct ipr_dump_version_entry) -
3070 sizeof(struct ipr_dump_entry_header);
3071 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3072 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3073 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3074 driver_dump->hdr.num_entries++;
3075}
3076
3077/**
3078 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3079 * @ioa_cfg: ioa config struct
3080 * @driver_dump: driver dump struct
3081 *
3082 * Return value:
3083 * nothing
3084 **/
3085static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3086 struct ipr_driver_dump *driver_dump)
3087{
3088 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3089 driver_dump->trace_entry.hdr.len =
3090 sizeof(struct ipr_dump_trace_entry) -
3091 sizeof(struct ipr_dump_entry_header);
3092 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3094 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3095 driver_dump->hdr.num_entries++;
3096}
3097
3098/**
3099 * ipr_dump_location_data - Fill in the IOA location in the dump.
3100 * @ioa_cfg: ioa config struct
3101 * @driver_dump: driver dump struct
3102 *
3103 * Return value:
3104 * nothing
3105 **/
3106static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3107 struct ipr_driver_dump *driver_dump)
3108{
3109 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3110 driver_dump->location_entry.hdr.len =
3111 sizeof(struct ipr_dump_location_entry) -
3112 sizeof(struct ipr_dump_entry_header);
3113 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3114 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3115 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3116 driver_dump->hdr.num_entries++;
3117}
3118
3119/**
3120 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3121 * @ioa_cfg: ioa config struct
3122 * @dump: dump struct
3123 *
3124 * Return value:
3125 * nothing
3126 **/
3127static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3128{
3129 unsigned long start_addr, sdt_word;
3130 unsigned long lock_flags = 0;
3131 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3132 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3133 u32 num_entries, max_num_entries, start_off, end_off;
3134 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3135 struct ipr_sdt *sdt;
dcbad00e 3136 int valid = 1;
1da177e4
LT
3137 int i;
3138
3139 ENTER;
3140
3141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142
41e9a696 3143 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3145 return;
3146 }
3147
110def85
WB
3148 if (ioa_cfg->sis64) {
3149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150 ssleep(IPR_DUMP_DELAY_SECONDS);
3151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152 }
3153
1da177e4
LT
3154 start_addr = readl(ioa_cfg->ioa_mailbox);
3155
dcbad00e 3156 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3157 dev_err(&ioa_cfg->pdev->dev,
3158 "Invalid dump table format: %lx\n", start_addr);
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160 return;
3161 }
3162
3163 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3164
3165 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3166
3167 /* Initialize the overall dump header */
3168 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3169 driver_dump->hdr.num_entries = 1;
3170 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3171 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3172 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3173 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3174
3175 ipr_dump_version_data(ioa_cfg, driver_dump);
3176 ipr_dump_location_data(ioa_cfg, driver_dump);
3177 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3178 ipr_dump_trace_data(ioa_cfg, driver_dump);
3179
3180 /* Update dump_header */
3181 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3182
3183 /* IOA Dump entry */
3184 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3185 ioa_dump->hdr.len = 0;
3186 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3187 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3188
3189 /* First entries in sdt are actually a list of dump addresses and
3190 lengths to gather the real dump data. sdt represents the pointer
3191 to the ioa generated dump table. Dump data will be extracted based
3192 on entries in this table */
3193 sdt = &ioa_dump->sdt;
3194
4d4dd706
KSS
3195 if (ioa_cfg->sis64) {
3196 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3197 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3198 } else {
3199 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3200 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3201 }
3202
3203 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3204 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3205 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3206 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3207
3208 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3209 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3210 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3211 dev_err(&ioa_cfg->pdev->dev,
3212 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3213 rc, be32_to_cpu(sdt->hdr.state));
3214 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3215 ioa_cfg->sdt_state = DUMP_OBTAINED;
3216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217 return;
3218 }
3219
3220 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3221
4d4dd706
KSS
3222 if (num_entries > max_num_entries)
3223 num_entries = max_num_entries;
3224
3225 /* Update dump length to the actual data to be copied */
3226 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3227 if (ioa_cfg->sis64)
3228 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3229 else
3230 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3231
3232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233
3234 for (i = 0; i < num_entries; i++) {
4d4dd706 3235 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3236 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3237 break;
3238 }
3239
3240 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3241 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3242 if (ioa_cfg->sis64)
3243 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3244 else {
3245 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3246 end_off = be32_to_cpu(sdt->entry[i].end_token);
3247
3248 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3249 bytes_to_copy = end_off - start_off;
3250 else
3251 valid = 0;
3252 }
3253 if (valid) {
4d4dd706 3254 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3255 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3256 continue;
3257 }
3258
3259 /* Copy data from adapter to driver buffers */
3260 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3261 bytes_to_copy);
3262
3263 ioa_dump->hdr.len += bytes_copied;
3264
3265 if (bytes_copied != bytes_to_copy) {
3266 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3267 break;
3268 }
3269 }
3270 }
3271 }
3272
3273 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3274
3275 /* Update dump_header */
3276 driver_dump->hdr.len += ioa_dump->hdr.len;
3277 wmb();
3278 ioa_cfg->sdt_state = DUMP_OBTAINED;
3279 LEAVE;
3280}
3281
3282#else
203fa3fe 3283#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3284#endif
3285
3286/**
3287 * ipr_release_dump - Free adapter dump memory
3288 * @kref: kref struct
3289 *
3290 * Return value:
3291 * nothing
3292 **/
3293static void ipr_release_dump(struct kref *kref)
3294{
203fa3fe 3295 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3296 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3297 unsigned long lock_flags = 0;
3298 int i;
3299
3300 ENTER;
3301 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3302 ioa_cfg->dump = NULL;
3303 ioa_cfg->sdt_state = INACTIVE;
3304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305
3306 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3307 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3308
4d4dd706 3309 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3310 kfree(dump);
3311 LEAVE;
3312}
3313
3314/**
3315 * ipr_worker_thread - Worker thread
c4028958 3316 * @work: ioa config struct
1da177e4
LT
3317 *
3318 * Called at task level from a work thread. This function takes care
3319 * of adding and removing device from the mid-layer as configuration
3320 * changes are detected by the adapter.
3321 *
3322 * Return value:
3323 * nothing
3324 **/
c4028958 3325static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3326{
3327 unsigned long lock_flags;
3328 struct ipr_resource_entry *res;
3329 struct scsi_device *sdev;
3330 struct ipr_dump *dump;
c4028958
DH
3331 struct ipr_ioa_cfg *ioa_cfg =
3332 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3333 u8 bus, target, lun;
3334 int did_work;
3335
3336 ENTER;
3337 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3338
41e9a696 3339 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3340 dump = ioa_cfg->dump;
3341 if (!dump) {
3342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343 return;
3344 }
3345 kref_get(&dump->kref);
3346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347 ipr_get_ioa_dump(ioa_cfg, dump);
3348 kref_put(&dump->kref, ipr_release_dump);
3349
3350 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3351 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3352 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354 return;
3355 }
3356
b0e17a9b
BK
3357 if (ioa_cfg->scsi_unblock) {
3358 ioa_cfg->scsi_unblock = 0;
3359 ioa_cfg->scsi_blocked = 0;
3360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361 scsi_unblock_requests(ioa_cfg->host);
3362 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363 if (ioa_cfg->scsi_blocked)
3364 scsi_block_requests(ioa_cfg->host);
3365 }
3366
b195d5e2
BK
3367 if (!ioa_cfg->scan_enabled) {
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 return;
3370 }
3371
1da177e4
LT
3372restart:
3373 do {
3374 did_work = 0;
f688f96d 3375 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4
LT
3376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 return;
3378 }
3379
3380 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3381 if (res->del_from_ml && res->sdev) {
3382 did_work = 1;
3383 sdev = res->sdev;
3384 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3385 if (!res->add_to_ml)
3386 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3387 else
3388 res->del_from_ml = 0;
1da177e4
LT
3389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390 scsi_remove_device(sdev);
3391 scsi_device_put(sdev);
3392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393 }
3394 break;
3395 }
3396 }
203fa3fe 3397 } while (did_work);
1da177e4
LT
3398
3399 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3400 if (res->add_to_ml) {
3e7ebdfa
WB
3401 bus = res->bus;
3402 target = res->target;
3403 lun = res->lun;
1121b794 3404 res->add_to_ml = 0;
1da177e4
LT
3405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406 scsi_add_device(ioa_cfg->host, bus, target, lun);
3407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408 goto restart;
3409 }
3410 }
3411
f688f96d 3412 ioa_cfg->scan_done = 1;
1da177e4 3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3414 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3415 LEAVE;
3416}
3417
3418#ifdef CONFIG_SCSI_IPR_TRACE
3419/**
3420 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3421 * @filp: open sysfs file
1da177e4 3422 * @kobj: kobject struct
91a69029 3423 * @bin_attr: bin_attribute struct
1da177e4
LT
3424 * @buf: buffer
3425 * @off: offset
3426 * @count: buffer size
3427 *
3428 * Return value:
3429 * number of bytes printed to buffer
3430 **/
2c3c8bea 3431static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3432 struct bin_attribute *bin_attr,
3433 char *buf, loff_t off, size_t count)
1da177e4 3434{
ee959b00
TJ
3435 struct device *dev = container_of(kobj, struct device, kobj);
3436 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3437 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3438 unsigned long lock_flags = 0;
d777aaf3 3439 ssize_t ret;
1da177e4
LT
3440
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3442 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3443 IPR_TRACE_SIZE);
1da177e4 3444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3445
3446 return ret;
1da177e4
LT
3447}
3448
3449static struct bin_attribute ipr_trace_attr = {
3450 .attr = {
3451 .name = "trace",
3452 .mode = S_IRUGO,
3453 },
3454 .size = 0,
3455 .read = ipr_read_trace,
3456};
3457#endif
3458
3459/**
3460 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3461 * @dev: class device struct
3462 * @buf: buffer
1da177e4
LT
3463 *
3464 * Return value:
3465 * number of bytes printed to buffer
3466 **/
ee959b00
TJ
3467static ssize_t ipr_show_fw_version(struct device *dev,
3468 struct device_attribute *attr, char *buf)
1da177e4 3469{
ee959b00 3470 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3471 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3473 unsigned long lock_flags = 0;
3474 int len;
3475
3476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3477 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3478 ucode_vpd->major_release, ucode_vpd->card_type,
3479 ucode_vpd->minor_release[0],
3480 ucode_vpd->minor_release[1]);
3481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482 return len;
3483}
3484
ee959b00 3485static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3486 .attr = {
3487 .name = "fw_version",
3488 .mode = S_IRUGO,
3489 },
3490 .show = ipr_show_fw_version,
3491};
3492
3493/**
3494 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3495 * @dev: class device struct
3496 * @buf: buffer
1da177e4
LT
3497 *
3498 * Return value:
3499 * number of bytes printed to buffer
3500 **/
ee959b00
TJ
3501static ssize_t ipr_show_log_level(struct device *dev,
3502 struct device_attribute *attr, char *buf)
1da177e4 3503{
ee959b00 3504 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3506 unsigned long lock_flags = 0;
3507 int len;
3508
3509 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512 return len;
3513}
3514
3515/**
3516 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3517 * @dev: class device struct
3518 * @buf: buffer
1da177e4
LT
3519 *
3520 * Return value:
3521 * number of bytes printed to buffer
3522 **/
ee959b00 3523static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3524 struct device_attribute *attr,
1da177e4
LT
3525 const char *buf, size_t count)
3526{
ee959b00 3527 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3529 unsigned long lock_flags = 0;
3530
3531 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3532 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534 return strlen(buf);
3535}
3536
ee959b00 3537static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3538 .attr = {
3539 .name = "log_level",
3540 .mode = S_IRUGO | S_IWUSR,
3541 },
3542 .show = ipr_show_log_level,
3543 .store = ipr_store_log_level
3544};
3545
3546/**
3547 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3548 * @dev: device struct
3549 * @buf: buffer
3550 * @count: buffer size
1da177e4
LT
3551 *
3552 * This function will reset the adapter and wait a reasonable
3553 * amount of time for any errors that the adapter might log.
3554 *
3555 * Return value:
3556 * count on success / other on failure
3557 **/
ee959b00
TJ
3558static ssize_t ipr_store_diagnostics(struct device *dev,
3559 struct device_attribute *attr,
1da177e4
LT
3560 const char *buf, size_t count)
3561{
ee959b00 3562 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3564 unsigned long lock_flags = 0;
3565 int rc = count;
3566
3567 if (!capable(CAP_SYS_ADMIN))
3568 return -EACCES;
3569
1da177e4 3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3571 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3574 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3575 }
3576
1da177e4
LT
3577 ioa_cfg->errors_logged = 0;
3578 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3579
3580 if (ioa_cfg->in_reset_reload) {
3581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3583
3584 /* Wait for a second for any errors to be logged */
3585 msleep(1000);
3586 } else {
3587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3588 return -EIO;
3589 }
3590
3591 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3592 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3593 rc = -EIO;
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595
3596 return rc;
3597}
3598
ee959b00 3599static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3600 .attr = {
3601 .name = "run_diagnostics",
3602 .mode = S_IWUSR,
3603 },
3604 .store = ipr_store_diagnostics
3605};
3606
f37eb54b 3607/**
3608 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3609 * @class_dev: device struct
3610 * @buf: buffer
f37eb54b 3611 *
3612 * Return value:
3613 * number of bytes printed to buffer
3614 **/
ee959b00
TJ
3615static ssize_t ipr_show_adapter_state(struct device *dev,
3616 struct device_attribute *attr, char *buf)
f37eb54b 3617{
ee959b00 3618 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3619 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3620 unsigned long lock_flags = 0;
3621 int len;
3622
3623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3624 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b 3625 len = snprintf(buf, PAGE_SIZE, "offline\n");
3626 else
3627 len = snprintf(buf, PAGE_SIZE, "online\n");
3628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3629 return len;
3630}
3631
3632/**
3633 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3634 * @dev: device struct
3635 * @buf: buffer
3636 * @count: buffer size
f37eb54b 3637 *
3638 * This function will change the adapter's state.
3639 *
3640 * Return value:
3641 * count on success / other on failure
3642 **/
ee959b00
TJ
3643static ssize_t ipr_store_adapter_state(struct device *dev,
3644 struct device_attribute *attr,
f37eb54b 3645 const char *buf, size_t count)
3646{
ee959b00 3647 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3648 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3649 unsigned long lock_flags;
56d6aa33 3650 int result = count, i;
f37eb54b 3651
3652 if (!capable(CAP_SYS_ADMIN))
3653 return -EACCES;
3654
3655 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3656 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3657 !strncmp(buf, "online", 6)) {
3658 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3659 spin_lock(&ioa_cfg->hrrq[i]._lock);
3660 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3661 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3662 }
3663 wmb();
f37eb54b 3664 ioa_cfg->reset_retries = 0;
3665 ioa_cfg->in_ioa_bringdown = 0;
3666 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3667 }
3668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3670
3671 return result;
3672}
3673
ee959b00 3674static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3675 .attr = {
49dd0961 3676 .name = "online_state",
f37eb54b 3677 .mode = S_IRUGO | S_IWUSR,
3678 },
3679 .show = ipr_show_adapter_state,
3680 .store = ipr_store_adapter_state
3681};
3682
1da177e4
LT
3683/**
3684 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3685 * @dev: device struct
3686 * @buf: buffer
3687 * @count: buffer size
1da177e4
LT
3688 *
3689 * This function will reset the adapter.
3690 *
3691 * Return value:
3692 * count on success / other on failure
3693 **/
ee959b00
TJ
3694static ssize_t ipr_store_reset_adapter(struct device *dev,
3695 struct device_attribute *attr,
1da177e4
LT
3696 const char *buf, size_t count)
3697{
ee959b00 3698 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3699 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700 unsigned long lock_flags;
3701 int result = count;
3702
3703 if (!capable(CAP_SYS_ADMIN))
3704 return -EACCES;
3705
3706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3707 if (!ioa_cfg->in_reset_reload)
3708 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3710 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3711
3712 return result;
3713}
3714
ee959b00 3715static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3716 .attr = {
3717 .name = "reset_host",
3718 .mode = S_IWUSR,
3719 },
3720 .store = ipr_store_reset_adapter
3721};
3722
511cbce2 3723static int ipr_iopoll(struct irq_poll *iop, int budget);
b53d124a 3724 /**
3725 * ipr_show_iopoll_weight - Show ipr polling mode
3726 * @dev: class device struct
3727 * @buf: buffer
3728 *
3729 * Return value:
3730 * number of bytes printed to buffer
3731 **/
3732static ssize_t ipr_show_iopoll_weight(struct device *dev,
3733 struct device_attribute *attr, char *buf)
3734{
3735 struct Scsi_Host *shost = class_to_shost(dev);
3736 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3737 unsigned long lock_flags = 0;
3738 int len;
3739
3740 spin_lock_irqsave(shost->host_lock, lock_flags);
3741 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3742 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744 return len;
3745}
3746
3747/**
3748 * ipr_store_iopoll_weight - Change the adapter's polling mode
3749 * @dev: class device struct
3750 * @buf: buffer
3751 *
3752 * Return value:
3753 * number of bytes printed to buffer
3754 **/
3755static ssize_t ipr_store_iopoll_weight(struct device *dev,
3756 struct device_attribute *attr,
3757 const char *buf, size_t count)
3758{
3759 struct Scsi_Host *shost = class_to_shost(dev);
3760 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3761 unsigned long user_iopoll_weight;
3762 unsigned long lock_flags = 0;
3763 int i;
3764
3765 if (!ioa_cfg->sis64) {
511cbce2 3766 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
b53d124a 3767 return -EINVAL;
3768 }
3769 if (kstrtoul(buf, 10, &user_iopoll_weight))
3770 return -EINVAL;
3771
3772 if (user_iopoll_weight > 256) {
511cbce2 3773 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
b53d124a 3774 return -EINVAL;
3775 }
3776
3777 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
511cbce2 3778 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
b53d124a 3779 return strlen(buf);
3780 }
3781
89f8b33c 3782 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3783 for (i = 1; i < ioa_cfg->hrrq_num; i++)
511cbce2 3784 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
b53d124a 3785 }
3786
3787 spin_lock_irqsave(shost->host_lock, lock_flags);
3788 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3789 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3790 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
511cbce2 3791 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
b53d124a 3792 ioa_cfg->iopoll_weight, ipr_iopoll);
b53d124a 3793 }
3794 }
3795 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3796
3797 return strlen(buf);
3798}
3799
3800static struct device_attribute ipr_iopoll_weight_attr = {
3801 .attr = {
3802 .name = "iopoll_weight",
3803 .mode = S_IRUGO | S_IWUSR,
3804 },
3805 .show = ipr_show_iopoll_weight,
3806 .store = ipr_store_iopoll_weight
3807};
3808
1da177e4
LT
3809/**
3810 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3811 * @buf_len: buffer length
3812 *
3813 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3814 * list to use for microcode download
3815 *
3816 * Return value:
3817 * pointer to sglist / NULL on failure
3818 **/
3819static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3820{
f95dc1bb 3821 int sg_size, order;
1da177e4 3822 struct ipr_sglist *sglist;
1da177e4
LT
3823
3824 /* Get the minimum size per scatter/gather element */
3825 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3826
3827 /* Get the actual size per element */
3828 order = get_order(sg_size);
3829
1da177e4 3830 /* Allocate a scatter/gather list for the DMA */
f95dc1bb 3831 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
1da177e4
LT
3832 if (sglist == NULL) {
3833 ipr_trace;
3834 return NULL;
3835 }
1da177e4 3836 sglist->order = order;
f95dc1bb
BVA
3837 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3838 &sglist->num_sg);
3839 if (!sglist->scatterlist) {
3840 kfree(sglist);
3841 return NULL;
1da177e4
LT
3842 }
3843
3844 return sglist;
3845}
3846
3847/**
3848 * ipr_free_ucode_buffer - Frees a microcode download buffer
3849 * @p_dnld: scatter/gather list pointer
3850 *
3851 * Free a DMA'able ucode download buffer previously allocated with
3852 * ipr_alloc_ucode_buffer
3853 *
3854 * Return value:
3855 * nothing
3856 **/
3857static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3858{
f95dc1bb 3859 sgl_free_order(sglist->scatterlist, sglist->order);
1da177e4
LT
3860 kfree(sglist);
3861}
3862
3863/**
3864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3865 * @sglist: scatter/gather list pointer
3866 * @buffer: buffer pointer
3867 * @len: buffer length
3868 *
3869 * Copy a microcode image from a user buffer into a buffer allocated by
3870 * ipr_alloc_ucode_buffer
3871 *
3872 * Return value:
3873 * 0 on success / other on failure
3874 **/
3875static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3876 u8 *buffer, u32 len)
3877{
3878 int bsize_elem, i, result = 0;
3879 struct scatterlist *scatterlist;
3880 void *kaddr;
3881
3882 /* Determine the actual number of bytes per element */
3883 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3884
3885 scatterlist = sglist->scatterlist;
3886
3887 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3888 struct page *page = sg_page(&scatterlist[i]);
3889
3890 kaddr = kmap(page);
1da177e4 3891 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3892 kunmap(page);
1da177e4
LT
3893
3894 scatterlist[i].length = bsize_elem;
3895
3896 if (result != 0) {
3897 ipr_trace;
3898 return result;
3899 }
3900 }
3901
3902 if (len % bsize_elem) {
45711f1a
JA
3903 struct page *page = sg_page(&scatterlist[i]);
3904
3905 kaddr = kmap(page);
1da177e4 3906 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3907 kunmap(page);
1da177e4
LT
3908
3909 scatterlist[i].length = len % bsize_elem;
3910 }
3911
3912 sglist->buffer_len = len;
3913 return result;
3914}
3915
a32c055f
WB
3916/**
3917 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3918 * @ipr_cmd: ipr command struct
3919 * @sglist: scatter/gather list
3920 *
3921 * Builds a microcode download IOA data list (IOADL).
3922 *
3923 **/
3924static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3925 struct ipr_sglist *sglist)
3926{
3927 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3928 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3929 struct scatterlist *scatterlist = sglist->scatterlist;
3930 int i;
3931
3932 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3933 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3934 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3935
3936 ioarcb->ioadl_len =
3937 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3938 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3939 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3940 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3941 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3942 }
3943
3944 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3945}
3946
1da177e4 3947/**
12baa420 3948 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3949 * @ipr_cmd: ipr command struct
3950 * @sglist: scatter/gather list
1da177e4 3951 *
12baa420 3952 * Builds a microcode download IOA data list (IOADL).
1da177e4 3953 *
1da177e4 3954 **/
12baa420 3955static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3956 struct ipr_sglist *sglist)
1da177e4 3957{
1da177e4 3958 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3959 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3960 struct scatterlist *scatterlist = sglist->scatterlist;
3961 int i;
3962
12baa420 3963 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3964 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3965 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3966
3967 ioarcb->ioadl_len =
1da177e4
LT
3968 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3969
3970 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3971 ioadl[i].flags_and_data_len =
3972 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3973 ioadl[i].address =
3974 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3975 }
3976
12baa420 3977 ioadl[i-1].flags_and_data_len |=
3978 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3979}
3980
3981/**
3982 * ipr_update_ioa_ucode - Update IOA's microcode
3983 * @ioa_cfg: ioa config struct
3984 * @sglist: scatter/gather list
3985 *
3986 * Initiate an adapter reset to update the IOA's microcode
3987 *
3988 * Return value:
3989 * 0 on success / -EIO on failure
3990 **/
3991static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3992 struct ipr_sglist *sglist)
3993{
3994 unsigned long lock_flags;
3995
3996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3997 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3999 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001 }
12baa420 4002
4003 if (ioa_cfg->ucode_sglist) {
4004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4005 dev_err(&ioa_cfg->pdev->dev,
4006 "Microcode download already in progress\n");
4007 return -EIO;
1da177e4 4008 }
12baa420 4009
d73341bf
AB
4010 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4011 sglist->scatterlist, sglist->num_sg,
4012 DMA_TO_DEVICE);
12baa420 4013
4014 if (!sglist->num_dma_sg) {
4015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016 dev_err(&ioa_cfg->pdev->dev,
4017 "Failed to map microcode download buffer!\n");
1da177e4
LT
4018 return -EIO;
4019 }
4020
12baa420 4021 ioa_cfg->ucode_sglist = sglist;
4022 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4024 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4025
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 ioa_cfg->ucode_sglist = NULL;
4028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
4029 return 0;
4030}
4031
4032/**
4033 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
4034 * @class_dev: device struct
4035 * @buf: buffer
4036 * @count: buffer size
1da177e4
LT
4037 *
4038 * This function will update the firmware on the adapter.
4039 *
4040 * Return value:
4041 * count on success / other on failure
4042 **/
ee959b00
TJ
4043static ssize_t ipr_store_update_fw(struct device *dev,
4044 struct device_attribute *attr,
4045 const char *buf, size_t count)
1da177e4 4046{
ee959b00 4047 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
4048 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4049 struct ipr_ucode_image_header *image_hdr;
4050 const struct firmware *fw_entry;
4051 struct ipr_sglist *sglist;
1da177e4
LT
4052 char fname[100];
4053 char *src;
21b81716 4054 char *endline;
d63c7dd5 4055 int result, dnld_size;
1da177e4
LT
4056
4057 if (!capable(CAP_SYS_ADMIN))
4058 return -EACCES;
4059
d63c7dd5 4060 snprintf(fname, sizeof(fname), "%s", buf);
1da177e4 4061
21b81716
GKB
4062 endline = strchr(fname, '\n');
4063 if (endline)
4064 *endline = '\0';
4065
203fa3fe 4066 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
4067 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4068 return -EIO;
4069 }
4070
4071 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4072
1da177e4
LT
4073 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4074 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4075 sglist = ipr_alloc_ucode_buffer(dnld_size);
4076
4077 if (!sglist) {
4078 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4079 release_firmware(fw_entry);
4080 return -ENOMEM;
4081 }
4082
4083 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4084
4085 if (result) {
4086 dev_err(&ioa_cfg->pdev->dev,
4087 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4088 goto out;
1da177e4
LT
4089 }
4090
14ed9cc7
WB
4091 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4092
12baa420 4093 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4094
12baa420 4095 if (!result)
4096 result = count;
4097out:
1da177e4
LT
4098 ipr_free_ucode_buffer(sglist);
4099 release_firmware(fw_entry);
12baa420 4100 return result;
1da177e4
LT
4101}
4102
ee959b00 4103static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4104 .attr = {
4105 .name = "update_fw",
4106 .mode = S_IWUSR,
4107 },
4108 .store = ipr_store_update_fw
4109};
4110
75576bb9
WB
4111/**
4112 * ipr_show_fw_type - Show the adapter's firmware type.
4113 * @dev: class device struct
4114 * @buf: buffer
4115 *
4116 * Return value:
4117 * number of bytes printed to buffer
4118 **/
4119static ssize_t ipr_show_fw_type(struct device *dev,
4120 struct device_attribute *attr, char *buf)
4121{
4122 struct Scsi_Host *shost = class_to_shost(dev);
4123 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4124 unsigned long lock_flags = 0;
4125 int len;
4126
4127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4128 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4130 return len;
4131}
4132
4133static struct device_attribute ipr_ioa_fw_type_attr = {
4134 .attr = {
4135 .name = "fw_type",
4136 .mode = S_IRUGO,
4137 },
4138 .show = ipr_show_fw_type
4139};
4140
afc3f83c
BK
4141static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4142 struct bin_attribute *bin_attr, char *buf,
4143 loff_t off, size_t count)
4144{
4145 struct device *cdev = container_of(kobj, struct device, kobj);
4146 struct Scsi_Host *shost = class_to_shost(cdev);
4147 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4148 struct ipr_hostrcb *hostrcb;
4149 unsigned long lock_flags = 0;
4150 int ret;
4151
4152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4153 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4154 struct ipr_hostrcb, queue);
4155 if (!hostrcb) {
4156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4157 return 0;
4158 }
4159 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4160 sizeof(hostrcb->hcam));
4161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4162 return ret;
4163}
4164
4165static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4166 struct bin_attribute *bin_attr, char *buf,
4167 loff_t off, size_t count)
4168{
4169 struct device *cdev = container_of(kobj, struct device, kobj);
4170 struct Scsi_Host *shost = class_to_shost(cdev);
4171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4172 struct ipr_hostrcb *hostrcb;
4173 unsigned long lock_flags = 0;
4174
4175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4176 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4177 struct ipr_hostrcb, queue);
4178 if (!hostrcb) {
4179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4180 return count;
4181 }
4182
4183 /* Reclaim hostrcb before exit */
4184 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186 return count;
4187}
4188
4189static struct bin_attribute ipr_ioa_async_err_log = {
4190 .attr = {
4191 .name = "async_err_log",
4192 .mode = S_IRUGO | S_IWUSR,
4193 },
4194 .size = 0,
4195 .read = ipr_read_async_err_log,
4196 .write = ipr_next_async_err_log
4197};
4198
ee959b00 4199static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
4200 &ipr_fw_version_attr,
4201 &ipr_log_level_attr,
4202 &ipr_diagnostics_attr,
f37eb54b 4203 &ipr_ioa_state_attr,
1da177e4
LT
4204 &ipr_ioa_reset_attr,
4205 &ipr_update_fw_attr,
75576bb9 4206 &ipr_ioa_fw_type_attr,
b53d124a 4207 &ipr_iopoll_weight_attr,
1da177e4
LT
4208 NULL,
4209};
4210
4211#ifdef CONFIG_SCSI_IPR_DUMP
4212/**
4213 * ipr_read_dump - Dump the adapter
2c3c8bea 4214 * @filp: open sysfs file
1da177e4 4215 * @kobj: kobject struct
91a69029 4216 * @bin_attr: bin_attribute struct
1da177e4
LT
4217 * @buf: buffer
4218 * @off: offset
4219 * @count: buffer size
4220 *
4221 * Return value:
4222 * number of bytes printed to buffer
4223 **/
2c3c8bea 4224static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4225 struct bin_attribute *bin_attr,
4226 char *buf, loff_t off, size_t count)
1da177e4 4227{
ee959b00 4228 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4229 struct Scsi_Host *shost = class_to_shost(cdev);
4230 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4231 struct ipr_dump *dump;
4232 unsigned long lock_flags = 0;
4233 char *src;
4d4dd706 4234 int len, sdt_end;
1da177e4
LT
4235 size_t rc = count;
4236
4237 if (!capable(CAP_SYS_ADMIN))
4238 return -EACCES;
4239
4240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4241 dump = ioa_cfg->dump;
4242
4243 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4245 return 0;
4246 }
4247 kref_get(&dump->kref);
4248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249
4250 if (off > dump->driver_dump.hdr.len) {
4251 kref_put(&dump->kref, ipr_release_dump);
4252 return 0;
4253 }
4254
4255 if (off + count > dump->driver_dump.hdr.len) {
4256 count = dump->driver_dump.hdr.len - off;
4257 rc = count;
4258 }
4259
4260 if (count && off < sizeof(dump->driver_dump)) {
4261 if (off + count > sizeof(dump->driver_dump))
4262 len = sizeof(dump->driver_dump) - off;
4263 else
4264 len = count;
4265 src = (u8 *)&dump->driver_dump + off;
4266 memcpy(buf, src, len);
4267 buf += len;
4268 off += len;
4269 count -= len;
4270 }
4271
4272 off -= sizeof(dump->driver_dump);
4273
4d4dd706
KSS
4274 if (ioa_cfg->sis64)
4275 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4276 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4277 sizeof(struct ipr_sdt_entry));
4278 else
4279 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4280 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4281
4282 if (count && off < sdt_end) {
4283 if (off + count > sdt_end)
4284 len = sdt_end - off;
1da177e4
LT
4285 else
4286 len = count;
4287 src = (u8 *)&dump->ioa_dump + off;
4288 memcpy(buf, src, len);
4289 buf += len;
4290 off += len;
4291 count -= len;
4292 }
4293
4d4dd706 4294 off -= sdt_end;
1da177e4
LT
4295
4296 while (count) {
4297 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4298 len = PAGE_ALIGN(off) - off;
4299 else
4300 len = count;
4301 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4302 src += off & ~PAGE_MASK;
4303 memcpy(buf, src, len);
4304 buf += len;
4305 off += len;
4306 count -= len;
4307 }
4308
4309 kref_put(&dump->kref, ipr_release_dump);
4310 return rc;
4311}
4312
4313/**
4314 * ipr_alloc_dump - Prepare for adapter dump
4315 * @ioa_cfg: ioa config struct
4316 *
4317 * Return value:
4318 * 0 on success / other on failure
4319 **/
4320static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4321{
4322 struct ipr_dump *dump;
4d4dd706 4323 __be32 **ioa_data;
1da177e4
LT
4324 unsigned long lock_flags = 0;
4325
0bc42e35 4326 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4327
4328 if (!dump) {
4329 ipr_err("Dump memory allocation failed\n");
4330 return -ENOMEM;
4331 }
4332
4d4dd706 4333 if (ioa_cfg->sis64)
42bc47b3
KC
4334 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4335 sizeof(__be32 *)));
4d4dd706 4336 else
42bc47b3
KC
4337 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4338 sizeof(__be32 *)));
4d4dd706
KSS
4339
4340 if (!ioa_data) {
4341 ipr_err("Dump memory allocation failed\n");
4342 kfree(dump);
4343 return -ENOMEM;
4344 }
4345
4346 dump->ioa_dump.ioa_data = ioa_data;
4347
1da177e4
LT
4348 kref_init(&dump->kref);
4349 dump->ioa_cfg = ioa_cfg;
4350
4351 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4352
4353 if (INACTIVE != ioa_cfg->sdt_state) {
4354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4355 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4356 kfree(dump);
4357 return 0;
4358 }
4359
4360 ioa_cfg->dump = dump;
4361 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4362 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4363 ioa_cfg->dump_taken = 1;
4364 schedule_work(&ioa_cfg->work_q);
4365 }
4366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4367
1da177e4
LT
4368 return 0;
4369}
4370
4371/**
4372 * ipr_free_dump - Free adapter dump memory
4373 * @ioa_cfg: ioa config struct
4374 *
4375 * Return value:
4376 * 0 on success / other on failure
4377 **/
4378static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4379{
4380 struct ipr_dump *dump;
4381 unsigned long lock_flags = 0;
4382
4383 ENTER;
4384
4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 dump = ioa_cfg->dump;
4387 if (!dump) {
4388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4389 return 0;
4390 }
4391
4392 ioa_cfg->dump = NULL;
4393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4394
4395 kref_put(&dump->kref, ipr_release_dump);
4396
4397 LEAVE;
4398 return 0;
4399}
4400
4401/**
4402 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4403 * @filp: open sysfs file
1da177e4 4404 * @kobj: kobject struct
91a69029 4405 * @bin_attr: bin_attribute struct
1da177e4
LT
4406 * @buf: buffer
4407 * @off: offset
4408 * @count: buffer size
4409 *
4410 * Return value:
4411 * number of bytes printed to buffer
4412 **/
2c3c8bea 4413static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4414 struct bin_attribute *bin_attr,
4415 char *buf, loff_t off, size_t count)
1da177e4 4416{
ee959b00 4417 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4418 struct Scsi_Host *shost = class_to_shost(cdev);
4419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4420 int rc;
4421
4422 if (!capable(CAP_SYS_ADMIN))
4423 return -EACCES;
4424
4425 if (buf[0] == '1')
4426 rc = ipr_alloc_dump(ioa_cfg);
4427 else if (buf[0] == '0')
4428 rc = ipr_free_dump(ioa_cfg);
4429 else
4430 return -EINVAL;
4431
4432 if (rc)
4433 return rc;
4434 else
4435 return count;
4436}
4437
4438static struct bin_attribute ipr_dump_attr = {
4439 .attr = {
4440 .name = "dump",
4441 .mode = S_IRUSR | S_IWUSR,
4442 },
4443 .size = 0,
4444 .read = ipr_read_dump,
4445 .write = ipr_write_dump
4446};
4447#else
4448static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4449#endif
4450
4451/**
4452 * ipr_change_queue_depth - Change the device's queue depth
4453 * @sdev: scsi device struct
4454 * @qdepth: depth to set
e881a172 4455 * @reason: calling context
1da177e4
LT
4456 *
4457 * Return value:
4458 * actual depth set
4459 **/
db5ed4df 4460static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4461{
35a39691
BK
4462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4463 struct ipr_resource_entry *res;
4464 unsigned long lock_flags = 0;
4465
4466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4467 res = (struct ipr_resource_entry *)sdev->hostdata;
4468
4469 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4470 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4472
db5ed4df 4473 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4474 return sdev->queue_depth;
4475}
4476
1da177e4
LT
4477/**
4478 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4479 * @dev: device struct
46d74563 4480 * @attr: device attribute structure
1da177e4
LT
4481 * @buf: buffer
4482 *
4483 * Return value:
4484 * number of bytes printed to buffer
4485 **/
10523b3b 4486static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4487{
4488 struct scsi_device *sdev = to_scsi_device(dev);
4489 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4490 struct ipr_resource_entry *res;
4491 unsigned long lock_flags = 0;
4492 ssize_t len = -ENXIO;
4493
4494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495 res = (struct ipr_resource_entry *)sdev->hostdata;
4496 if (res)
3e7ebdfa 4497 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4499 return len;
4500}
4501
4502static struct device_attribute ipr_adapter_handle_attr = {
4503 .attr = {
4504 .name = "adapter_handle",
4505 .mode = S_IRUSR,
4506 },
4507 .show = ipr_show_adapter_handle
4508};
4509
3e7ebdfa 4510/**
5adcbeb3
WB
4511 * ipr_show_resource_path - Show the resource path or the resource address for
4512 * this device.
3e7ebdfa 4513 * @dev: device struct
46d74563 4514 * @attr: device attribute structure
3e7ebdfa
WB
4515 * @buf: buffer
4516 *
4517 * Return value:
4518 * number of bytes printed to buffer
4519 **/
4520static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4521{
4522 struct scsi_device *sdev = to_scsi_device(dev);
4523 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4524 struct ipr_resource_entry *res;
4525 unsigned long lock_flags = 0;
4526 ssize_t len = -ENXIO;
4527 char buffer[IPR_MAX_RES_PATH_LENGTH];
4528
4529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4530 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4531 if (res && ioa_cfg->sis64)
3e7ebdfa 4532 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4533 __ipr_format_res_path(res->res_path, buffer,
4534 sizeof(buffer)));
5adcbeb3
WB
4535 else if (res)
4536 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4537 res->bus, res->target, res->lun);
4538
3e7ebdfa
WB
4539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4540 return len;
4541}
4542
4543static struct device_attribute ipr_resource_path_attr = {
4544 .attr = {
4545 .name = "resource_path",
75576bb9 4546 .mode = S_IRUGO,
3e7ebdfa
WB
4547 },
4548 .show = ipr_show_resource_path
4549};
4550
46d74563
WB
4551/**
4552 * ipr_show_device_id - Show the device_id for this device.
4553 * @dev: device struct
4554 * @attr: device attribute structure
4555 * @buf: buffer
4556 *
4557 * Return value:
4558 * number of bytes printed to buffer
4559 **/
4560static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4561{
4562 struct scsi_device *sdev = to_scsi_device(dev);
4563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4564 struct ipr_resource_entry *res;
4565 unsigned long lock_flags = 0;
4566 ssize_t len = -ENXIO;
4567
4568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4569 res = (struct ipr_resource_entry *)sdev->hostdata;
4570 if (res && ioa_cfg->sis64)
bb8647e8 4571 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
46d74563
WB
4572 else if (res)
4573 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4574
4575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4576 return len;
4577}
4578
4579static struct device_attribute ipr_device_id_attr = {
4580 .attr = {
4581 .name = "device_id",
4582 .mode = S_IRUGO,
4583 },
4584 .show = ipr_show_device_id
4585};
4586
75576bb9
WB
4587/**
4588 * ipr_show_resource_type - Show the resource type for this device.
4589 * @dev: device struct
46d74563 4590 * @attr: device attribute structure
75576bb9
WB
4591 * @buf: buffer
4592 *
4593 * Return value:
4594 * number of bytes printed to buffer
4595 **/
4596static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4597{
4598 struct scsi_device *sdev = to_scsi_device(dev);
4599 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4600 struct ipr_resource_entry *res;
4601 unsigned long lock_flags = 0;
4602 ssize_t len = -ENXIO;
4603
4604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4605 res = (struct ipr_resource_entry *)sdev->hostdata;
4606
4607 if (res)
4608 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4609
4610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4611 return len;
4612}
4613
4614static struct device_attribute ipr_resource_type_attr = {
4615 .attr = {
4616 .name = "resource_type",
4617 .mode = S_IRUGO,
4618 },
4619 .show = ipr_show_resource_type
4620};
4621
f8ee25d7
WX
4622/**
4623 * ipr_show_raw_mode - Show the adapter's raw mode
4624 * @dev: class device struct
4625 * @buf: buffer
4626 *
4627 * Return value:
4628 * number of bytes printed to buffer
4629 **/
4630static ssize_t ipr_show_raw_mode(struct device *dev,
4631 struct device_attribute *attr, char *buf)
4632{
4633 struct scsi_device *sdev = to_scsi_device(dev);
4634 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4635 struct ipr_resource_entry *res;
4636 unsigned long lock_flags = 0;
4637 ssize_t len;
4638
4639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4640 res = (struct ipr_resource_entry *)sdev->hostdata;
4641 if (res)
4642 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4643 else
4644 len = -ENXIO;
4645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4646 return len;
4647}
4648
4649/**
4650 * ipr_store_raw_mode - Change the adapter's raw mode
4651 * @dev: class device struct
4652 * @buf: buffer
4653 *
4654 * Return value:
4655 * number of bytes printed to buffer
4656 **/
4657static ssize_t ipr_store_raw_mode(struct device *dev,
4658 struct device_attribute *attr,
4659 const char *buf, size_t count)
4660{
4661 struct scsi_device *sdev = to_scsi_device(dev);
4662 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663 struct ipr_resource_entry *res;
4664 unsigned long lock_flags = 0;
4665 ssize_t len;
4666
4667 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668 res = (struct ipr_resource_entry *)sdev->hostdata;
4669 if (res) {
e35d7f27 4670 if (ipr_is_af_dasd_device(res)) {
f8ee25d7
WX
4671 res->raw_mode = simple_strtoul(buf, NULL, 10);
4672 len = strlen(buf);
4673 if (res->sdev)
4674 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4675 res->raw_mode ? "enabled" : "disabled");
4676 } else
4677 len = -EINVAL;
4678 } else
4679 len = -ENXIO;
4680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4681 return len;
4682}
4683
4684static struct device_attribute ipr_raw_mode_attr = {
4685 .attr = {
4686 .name = "raw_mode",
4687 .mode = S_IRUGO | S_IWUSR,
4688 },
4689 .show = ipr_show_raw_mode,
4690 .store = ipr_store_raw_mode
4691};
4692
1da177e4
LT
4693static struct device_attribute *ipr_dev_attrs[] = {
4694 &ipr_adapter_handle_attr,
3e7ebdfa 4695 &ipr_resource_path_attr,
46d74563 4696 &ipr_device_id_attr,
75576bb9 4697 &ipr_resource_type_attr,
f8ee25d7 4698 &ipr_raw_mode_attr,
1da177e4
LT
4699 NULL,
4700};
4701
4702/**
4703 * ipr_biosparam - Return the HSC mapping
4704 * @sdev: scsi device struct
4705 * @block_device: block device pointer
4706 * @capacity: capacity of the device
4707 * @parm: Array containing returned HSC values.
4708 *
4709 * This function generates the HSC parms that fdisk uses.
4710 * We want to make sure we return something that places partitions
4711 * on 4k boundaries for best performance with the IOA.
4712 *
4713 * Return value:
4714 * 0 on success
4715 **/
4716static int ipr_biosparam(struct scsi_device *sdev,
4717 struct block_device *block_device,
4718 sector_t capacity, int *parm)
4719{
4720 int heads, sectors;
4721 sector_t cylinders;
4722
4723 heads = 128;
4724 sectors = 32;
4725
4726 cylinders = capacity;
4727 sector_div(cylinders, (128 * 32));
4728
4729 /* return result */
4730 parm[0] = heads;
4731 parm[1] = sectors;
4732 parm[2] = cylinders;
4733
4734 return 0;
4735}
4736
35a39691
BK
4737/**
4738 * ipr_find_starget - Find target based on bus/target.
4739 * @starget: scsi target struct
4740 *
4741 * Return value:
4742 * resource entry pointer if found / NULL if not found
4743 **/
4744static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4745{
4746 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4747 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4748 struct ipr_resource_entry *res;
4749
4750 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4751 if ((res->bus == starget->channel) &&
0ee1d714 4752 (res->target == starget->id)) {
35a39691
BK
4753 return res;
4754 }
4755 }
4756
4757 return NULL;
4758}
4759
4760static struct ata_port_info sata_port_info;
4761
4762/**
4763 * ipr_target_alloc - Prepare for commands to a SCSI target
4764 * @starget: scsi target struct
4765 *
4766 * If the device is a SATA device, this function allocates an
4767 * ATA port with libata, else it does nothing.
4768 *
4769 * Return value:
4770 * 0 on success / non-0 on failure
4771 **/
4772static int ipr_target_alloc(struct scsi_target *starget)
4773{
4774 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776 struct ipr_sata_port *sata_port;
4777 struct ata_port *ap;
4778 struct ipr_resource_entry *res;
4779 unsigned long lock_flags;
4780
4781 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4782 res = ipr_find_starget(starget);
4783 starget->hostdata = NULL;
4784
4785 if (res && ipr_is_gata(res)) {
4786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4787 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4788 if (!sata_port)
4789 return -ENOMEM;
4790
4791 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4792 if (ap) {
4793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4794 sata_port->ioa_cfg = ioa_cfg;
4795 sata_port->ap = ap;
4796 sata_port->res = res;
4797
4798 res->sata_port = sata_port;
4799 ap->private_data = sata_port;
4800 starget->hostdata = sata_port;
4801 } else {
4802 kfree(sata_port);
4803 return -ENOMEM;
4804 }
4805 }
4806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4807
4808 return 0;
4809}
4810
4811/**
4812 * ipr_target_destroy - Destroy a SCSI target
4813 * @starget: scsi target struct
4814 *
4815 * If the device was a SATA device, this function frees the libata
4816 * ATA port, else it does nothing.
4817 *
4818 **/
4819static void ipr_target_destroy(struct scsi_target *starget)
4820{
4821 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4822 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4823 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4824
4825 if (ioa_cfg->sis64) {
0ee1d714
BK
4826 if (!ipr_find_starget(starget)) {
4827 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4828 clear_bit(starget->id, ioa_cfg->array_ids);
4829 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4830 clear_bit(starget->id, ioa_cfg->vset_ids);
4831 else if (starget->channel == 0)
4832 clear_bit(starget->id, ioa_cfg->target_ids);
4833 }
3e7ebdfa 4834 }
35a39691
BK
4835
4836 if (sata_port) {
4837 starget->hostdata = NULL;
4838 ata_sas_port_destroy(sata_port->ap);
4839 kfree(sata_port);
4840 }
4841}
4842
4843/**
4844 * ipr_find_sdev - Find device based on bus/target/lun.
4845 * @sdev: scsi device struct
4846 *
4847 * Return value:
4848 * resource entry pointer if found / NULL if not found
4849 **/
4850static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4851{
4852 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4853 struct ipr_resource_entry *res;
4854
4855 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4856 if ((res->bus == sdev->channel) &&
4857 (res->target == sdev->id) &&
4858 (res->lun == sdev->lun))
35a39691
BK
4859 return res;
4860 }
4861
4862 return NULL;
4863}
4864
1da177e4
LT
4865/**
4866 * ipr_slave_destroy - Unconfigure a SCSI device
4867 * @sdev: scsi device struct
4868 *
4869 * Return value:
4870 * nothing
4871 **/
4872static void ipr_slave_destroy(struct scsi_device *sdev)
4873{
4874 struct ipr_resource_entry *res;
4875 struct ipr_ioa_cfg *ioa_cfg;
4876 unsigned long lock_flags = 0;
4877
4878 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4879
4880 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4881 res = (struct ipr_resource_entry *) sdev->hostdata;
4882 if (res) {
35a39691 4883 if (res->sata_port)
3e4ec344 4884 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4885 sdev->hostdata = NULL;
4886 res->sdev = NULL;
35a39691 4887 res->sata_port = NULL;
1da177e4
LT
4888 }
4889 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4890}
4891
4892/**
4893 * ipr_slave_configure - Configure a SCSI device
4894 * @sdev: scsi device struct
4895 *
4896 * This function configures the specified scsi device.
4897 *
4898 * Return value:
4899 * 0 on success
4900 **/
4901static int ipr_slave_configure(struct scsi_device *sdev)
4902{
4903 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4904 struct ipr_resource_entry *res;
dd406ef8 4905 struct ata_port *ap = NULL;
1da177e4 4906 unsigned long lock_flags = 0;
3e7ebdfa 4907 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4908
4909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4910 res = sdev->hostdata;
4911 if (res) {
4912 if (ipr_is_af_dasd_device(res))
4913 sdev->type = TYPE_RAID;
0726ce26 4914 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4915 sdev->scsi_level = 4;
0726ce26 4916 sdev->no_uld_attach = 1;
4917 }
1da177e4 4918 if (ipr_is_vset_device(res)) {
60654e25 4919 sdev->scsi_level = SCSI_SPC_3;
723cd772 4920 sdev->no_report_opcodes = 1;
242f9dcb
JA
4921 blk_queue_rq_timeout(sdev->request_queue,
4922 IPR_VSET_RW_TIMEOUT);
086fa5ff 4923 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4924 }
dd406ef8
BK
4925 if (ipr_is_gata(res) && res->sata_port)
4926 ap = res->sata_port->ap;
4927 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4928
4929 if (ap) {
db5ed4df 4930 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4931 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4932 }
4933
3e7ebdfa
WB
4934 if (ioa_cfg->sis64)
4935 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4936 ipr_format_res_path(ioa_cfg,
4937 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4938 return 0;
1da177e4
LT
4939 }
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 return 0;
4942}
4943
35a39691
BK
4944/**
4945 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4946 * @sdev: scsi device struct
4947 *
4948 * This function initializes an ATA port so that future commands
4949 * sent through queuecommand will work.
4950 *
4951 * Return value:
4952 * 0 on success
4953 **/
4954static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4955{
4956 struct ipr_sata_port *sata_port = NULL;
4957 int rc = -ENXIO;
4958
4959 ENTER;
4960 if (sdev->sdev_target)
4961 sata_port = sdev->sdev_target->hostdata;
b2024459 4962 if (sata_port) {
35a39691 4963 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4964 if (rc == 0)
4965 rc = ata_sas_sync_probe(sata_port->ap);
4966 }
4967
35a39691
BK
4968 if (rc)
4969 ipr_slave_destroy(sdev);
4970
4971 LEAVE;
4972 return rc;
4973}
4974
1da177e4
LT
4975/**
4976 * ipr_slave_alloc - Prepare for commands to a device.
4977 * @sdev: scsi device struct
4978 *
4979 * This function saves a pointer to the resource entry
4980 * in the scsi device struct if the device exists. We
4981 * can then use this pointer in ipr_queuecommand when
4982 * handling new commands.
4983 *
4984 * Return value:
692aebfc 4985 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4986 **/
4987static int ipr_slave_alloc(struct scsi_device *sdev)
4988{
4989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4990 struct ipr_resource_entry *res;
4991 unsigned long lock_flags;
692aebfc 4992 int rc = -ENXIO;
1da177e4
LT
4993
4994 sdev->hostdata = NULL;
4995
4996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4997
35a39691
BK
4998 res = ipr_find_sdev(sdev);
4999 if (res) {
5000 res->sdev = sdev;
5001 res->add_to_ml = 0;
5002 res->in_erp = 0;
5003 sdev->hostdata = res;
5004 if (!ipr_is_naca_model(res))
5005 res->needs_sync_complete = 1;
5006 rc = 0;
5007 if (ipr_is_gata(res)) {
5008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5009 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
5010 }
5011 }
5012
5013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5014
692aebfc 5015 return rc;
1da177e4
LT
5016}
5017
6cdb0817
BK
5018/**
5019 * ipr_match_lun - Match function for specified LUN
5020 * @ipr_cmd: ipr command struct
5021 * @device: device to match (sdev)
5022 *
5023 * Returns:
5024 * 1 if command matches sdev / 0 if command does not match sdev
5025 **/
5026static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5027{
5028 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5029 return 1;
5030 return 0;
5031}
5032
439ae285
BK
5033/**
5034 * ipr_cmnd_is_free - Check if a command is free or not
5035 * @ipr_cmd ipr command struct
5036 *
5037 * Returns:
5038 * true / false
5039 **/
5040static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5041{
5042 struct ipr_cmnd *loop_cmd;
5043
5044 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5045 if (loop_cmd == ipr_cmd)
5046 return true;
5047 }
5048
5049 return false;
5050}
5051
ef97d8ae
BK
5052/**
5053 * ipr_match_res - Match function for specified resource entry
5054 * @ipr_cmd: ipr command struct
5055 * @resource: resource entry to match
5056 *
5057 * Returns:
5058 * 1 if command matches sdev / 0 if command does not match sdev
5059 **/
5060static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5061{
5062 struct ipr_resource_entry *res = resource;
5063
5064 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5065 return 1;
5066 return 0;
5067}
5068
6cdb0817
BK
5069/**
5070 * ipr_wait_for_ops - Wait for matching commands to complete
5071 * @ipr_cmd: ipr command struct
5072 * @device: device to match (sdev)
5073 * @match: match function to use
5074 *
5075 * Returns:
5076 * SUCCESS / FAILED
5077 **/
5078static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5079 int (*match)(struct ipr_cmnd *, void *))
5080{
5081 struct ipr_cmnd *ipr_cmd;
439ae285 5082 int wait, i;
6cdb0817
BK
5083 unsigned long flags;
5084 struct ipr_hrr_queue *hrrq;
5085 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5086 DECLARE_COMPLETION_ONSTACK(comp);
5087
5088 ENTER;
5089 do {
5090 wait = 0;
5091
5092 for_each_hrrq(hrrq, ioa_cfg) {
5093 spin_lock_irqsave(hrrq->lock, flags);
439ae285
BK
5094 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5095 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5096 if (!ipr_cmnd_is_free(ipr_cmd)) {
5097 if (match(ipr_cmd, device)) {
5098 ipr_cmd->eh_comp = &comp;
5099 wait++;
5100 }
6cdb0817
BK
5101 }
5102 }
5103 spin_unlock_irqrestore(hrrq->lock, flags);
5104 }
5105
5106 if (wait) {
5107 timeout = wait_for_completion_timeout(&comp, timeout);
5108
5109 if (!timeout) {
5110 wait = 0;
5111
5112 for_each_hrrq(hrrq, ioa_cfg) {
5113 spin_lock_irqsave(hrrq->lock, flags);
439ae285
BK
5114 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5115 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5116 if (!ipr_cmnd_is_free(ipr_cmd)) {
5117 if (match(ipr_cmd, device)) {
5118 ipr_cmd->eh_comp = NULL;
5119 wait++;
5120 }
6cdb0817
BK
5121 }
5122 }
5123 spin_unlock_irqrestore(hrrq->lock, flags);
5124 }
5125
5126 if (wait)
5127 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5128 LEAVE;
5129 return wait ? FAILED : SUCCESS;
5130 }
5131 }
5132 } while (wait);
5133
5134 LEAVE;
5135 return SUCCESS;
5136}
5137
70233ac5 5138static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
5139{
5140 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 5141 unsigned long lock_flags = 0;
5142 int rc = SUCCESS;
1da177e4
LT
5143
5144 ENTER;
70233ac5 5145 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5146 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 5147
96b04db9 5148 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 5149 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
5150 dev_err(&ioa_cfg->pdev->dev,
5151 "Adapter being reset as a result of error recovery.\n");
1da177e4 5152
a92fa25c
KSS
5153 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5154 ioa_cfg->sdt_state = GET_DUMP;
5155 }
1da177e4 5156
70233ac5 5157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5158 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5159 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 5160
70233ac5 5161 /* If we got hit with a host reset while we were already resetting
5162 the adapter for some reason, and the reset failed. */
5163 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5164 ipr_trace;
5165 rc = FAILED;
5166 }
df0ae249 5167
70233ac5 5168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5169 LEAVE;
df0ae249
JG
5170 return rc;
5171}
5172
c6513096
BK
5173/**
5174 * ipr_device_reset - Reset the device
5175 * @ioa_cfg: ioa config struct
5176 * @res: resource entry struct
5177 *
5178 * This function issues a device reset to the affected device.
5179 * If the device is a SCSI device, a LUN reset will be sent
5180 * to the device first. If that does not work, a target reset
35a39691
BK
5181 * will be sent. If the device is a SATA device, a PHY reset will
5182 * be sent.
c6513096
BK
5183 *
5184 * Return value:
5185 * 0 on success / non-zero on failure
5186 **/
5187static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5188 struct ipr_resource_entry *res)
5189{
5190 struct ipr_cmnd *ipr_cmd;
5191 struct ipr_ioarcb *ioarcb;
5192 struct ipr_cmd_pkt *cmd_pkt;
35a39691 5193 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
5194 u32 ioasc;
5195
5196 ENTER;
5197 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5198 ioarcb = &ipr_cmd->ioarcb;
5199 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
5200
5201 if (ipr_cmd->ioa_cfg->sis64) {
5202 regs = &ipr_cmd->i.ata_ioadl.regs;
5203 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5204 } else
5205 regs = &ioarcb->u.add_data.u.regs;
c6513096 5206
3e7ebdfa 5207 ioarcb->res_handle = res->res_handle;
c6513096
BK
5208 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5209 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
5210 if (ipr_is_gata(res)) {
5211 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 5212 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
5213 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5214 }
c6513096
BK
5215
5216 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 5217 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5218 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
5219 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5220 if (ipr_cmd->ioa_cfg->sis64)
5221 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5222 sizeof(struct ipr_ioasa_gata));
5223 else
5224 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5225 sizeof(struct ipr_ioasa_gata));
5226 }
c6513096
BK
5227
5228 LEAVE;
203fa3fe 5229 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
5230}
5231
35a39691
BK
5232/**
5233 * ipr_sata_reset - Reset the SATA port
cc0680a5 5234 * @link: SATA link to reset
35a39691
BK
5235 * @classes: class of the attached device
5236 *
cc0680a5 5237 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
5238 *
5239 * Return value:
5240 * 0 on success / non-zero on failure
5241 **/
cc0680a5 5242static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5243 unsigned long deadline)
35a39691 5244{
cc0680a5 5245 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5246 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5247 struct ipr_resource_entry *res;
5248 unsigned long lock_flags = 0;
ef97d8ae 5249 int rc = -ENXIO, ret;
35a39691
BK
5250
5251 ENTER;
5252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5253 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5255 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5256 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5257 }
5258
35a39691
BK
5259 res = sata_port->res;
5260 if (res) {
5261 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5262 *classes = res->ata_class;
ef97d8ae
BK
5263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5264
5265 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5266 if (ret != SUCCESS) {
5267 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5268 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5269 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5270
5271 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5272 }
5273 } else
5274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
35a39691 5275
35a39691
BK
5276 LEAVE;
5277 return rc;
5278}
5279
1da177e4
LT
5280/**
5281 * ipr_eh_dev_reset - Reset the device
5282 * @scsi_cmd: scsi command struct
5283 *
5284 * This function issues a device reset to the affected device.
5285 * A LUN reset will be sent to the device first. If that does
5286 * not work, a target reset will be sent.
5287 *
5288 * Return value:
5289 * SUCCESS / FAILED
5290 **/
203fa3fe 5291static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5292{
5293 struct ipr_cmnd *ipr_cmd;
5294 struct ipr_ioa_cfg *ioa_cfg;
5295 struct ipr_resource_entry *res;
35a39691 5296 struct ata_port *ap;
439ae285 5297 int rc = 0, i;
05a6538a 5298 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5299
5300 ENTER;
5301 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5302 res = scsi_cmd->device->hostdata;
5303
1da177e4
LT
5304 /*
5305 * If we are currently going through reset/reload, return failed. This will force the
5306 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5307 * reset to complete
5308 */
5309 if (ioa_cfg->in_reset_reload)
5310 return FAILED;
56d6aa33 5311 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5312 return FAILED;
5313
05a6538a 5314 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5315 spin_lock(&hrrq->_lock);
439ae285
BK
5316 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5317 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5318
05a6538a 5319 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
960e9648
BK
5320 if (!ipr_cmd->qc)
5321 continue;
439ae285
BK
5322 if (ipr_cmnd_is_free(ipr_cmd))
5323 continue;
960e9648
BK
5324
5325 ipr_cmd->done = ipr_sata_eh_done;
5326 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
05a6538a 5327 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5328 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5329 }
7402ecef 5330 }
1da177e4 5331 }
56d6aa33 5332 spin_unlock(&hrrq->_lock);
1da177e4 5333 }
1da177e4 5334 res->resetting_device = 1;
fb3ed3cb 5335 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5336
5337 if (ipr_is_gata(res) && res->sata_port) {
5338 ap = res->sata_port->ap;
5339 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5340 ata_std_error_handler(ap);
35a39691
BK
5341 spin_lock_irq(scsi_cmd->device->host->host_lock);
5342 } else
5343 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5344 res->resetting_device = 0;
0b1f8d44 5345 res->reset_occurred = 1;
1da177e4 5346
1da177e4 5347 LEAVE;
203fa3fe 5348 return rc ? FAILED : SUCCESS;
1da177e4
LT
5349}
5350
203fa3fe 5351static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5352{
5353 int rc;
6cdb0817 5354 struct ipr_ioa_cfg *ioa_cfg;
ef97d8ae 5355 struct ipr_resource_entry *res;
6cdb0817
BK
5356
5357 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
ef97d8ae
BK
5358 res = cmd->device->hostdata;
5359
5360 if (!res)
5361 return FAILED;
94d0e7b8
JG
5362
5363 spin_lock_irq(cmd->device->host->host_lock);
5364 rc = __ipr_eh_dev_reset(cmd);
5365 spin_unlock_irq(cmd->device->host->host_lock);
5366
ef97d8ae
BK
5367 if (rc == SUCCESS) {
5368 if (ipr_is_gata(res) && res->sata_port)
5369 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5370 else
5371 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5372 }
6cdb0817 5373
94d0e7b8
JG
5374 return rc;
5375}
5376
1da177e4
LT
5377/**
5378 * ipr_bus_reset_done - Op done function for bus reset.
5379 * @ipr_cmd: ipr command struct
5380 *
5381 * This function is the op done function for a bus reset
5382 *
5383 * Return value:
5384 * none
5385 **/
5386static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5387{
5388 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5389 struct ipr_resource_entry *res;
5390
5391 ENTER;
3e7ebdfa
WB
5392 if (!ioa_cfg->sis64)
5393 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5394 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5395 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5396 break;
5397 }
1da177e4 5398 }
1da177e4
LT
5399
5400 /*
5401 * If abort has not completed, indicate the reset has, else call the
5402 * abort's done function to wake the sleeping eh thread
5403 */
5404 if (ipr_cmd->sibling->sibling)
5405 ipr_cmd->sibling->sibling = NULL;
5406 else
5407 ipr_cmd->sibling->done(ipr_cmd->sibling);
5408
05a6538a 5409 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5410 LEAVE;
5411}
5412
5413/**
5414 * ipr_abort_timeout - An abort task has timed out
5415 * @ipr_cmd: ipr command struct
5416 *
5417 * This function handles when an abort task times out. If this
5418 * happens we issue a bus reset since we have resources tied
5419 * up that must be freed before returning to the midlayer.
5420 *
5421 * Return value:
5422 * none
5423 **/
738c6ec5 5424static void ipr_abort_timeout(struct timer_list *t)
1da177e4 5425{
738c6ec5 5426 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
5427 struct ipr_cmnd *reset_cmd;
5428 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5429 struct ipr_cmd_pkt *cmd_pkt;
5430 unsigned long lock_flags = 0;
5431
5432 ENTER;
5433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5434 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5436 return;
5437 }
5438
fb3ed3cb 5439 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5440 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5441 ipr_cmd->sibling = reset_cmd;
5442 reset_cmd->sibling = ipr_cmd;
5443 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5444 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5445 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5446 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5447 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5448
5449 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5451 LEAVE;
5452}
5453
5454/**
5455 * ipr_cancel_op - Cancel specified op
5456 * @scsi_cmd: scsi command struct
5457 *
5458 * This function cancels specified op.
5459 *
5460 * Return value:
5461 * SUCCESS / FAILED
5462 **/
203fa3fe 5463static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5464{
5465 struct ipr_cmnd *ipr_cmd;
5466 struct ipr_ioa_cfg *ioa_cfg;
5467 struct ipr_resource_entry *res;
5468 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 5469 u32 ioasc, int_reg;
439ae285 5470 int i, op_found = 0;
05a6538a 5471 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5472
5473 ENTER;
5474 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5475 res = scsi_cmd->device->hostdata;
5476
8fa728a2
JG
5477 /* If we are currently going through reset/reload, return failed.
5478 * This will force the mid-layer to call ipr_eh_host_reset,
5479 * which will then go to sleep and wait for the reset to complete
5480 */
56d6aa33 5481 if (ioa_cfg->in_reset_reload ||
5482 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5483 return FAILED;
a92fa25c
KSS
5484 if (!res)
5485 return FAILED;
5486
5487 /*
5488 * If we are aborting a timed out op, chances are that the timeout was caused
5489 * by a still not detected EEH error. In such cases, reading a register will
5490 * trigger the EEH recovery infrastructure.
5491 */
5492 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5493
5494 if (!ipr_is_gscsi(res))
1da177e4
LT
5495 return FAILED;
5496
05a6538a 5497 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5498 spin_lock(&hrrq->_lock);
439ae285
BK
5499 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5500 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5501 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5502 op_found = 1;
5503 break;
5504 }
05a6538a 5505 }
1da177e4 5506 }
56d6aa33 5507 spin_unlock(&hrrq->_lock);
1da177e4
LT
5508 }
5509
5510 if (!op_found)
5511 return SUCCESS;
5512
5513 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5514 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5515 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5516 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5517 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5518 ipr_cmd->u.sdev = scsi_cmd->device;
5519
fb3ed3cb
BK
5520 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5521 scsi_cmd->cmnd[0]);
1da177e4 5522 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5523 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5524
5525 /*
5526 * If the abort task timed out and we sent a bus reset, we will get
5527 * one the following responses to the abort
5528 */
5529 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5530 ioasc = 0;
5531 ipr_trace;
5532 }
5533
c4ee22a3 5534 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa 5535 if (!ipr_is_naca_model(res))
5536 res->needs_sync_complete = 1;
1da177e4
LT
5537
5538 LEAVE;
203fa3fe 5539 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5540}
5541
5542/**
5543 * ipr_eh_abort - Abort a single op
5544 * @scsi_cmd: scsi command struct
5545 *
5546 * Return value:
f688f96d
BK
5547 * 0 if scan in progress / 1 if scan is complete
5548 **/
5549static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5550{
5551 unsigned long lock_flags;
5552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5553 int rc = 0;
5554
5555 spin_lock_irqsave(shost->host_lock, lock_flags);
5556 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5557 rc = 1;
5558 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5559 rc = 1;
5560 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5561 return rc;
5562}
5563
5564/**
5565 * ipr_eh_host_reset - Reset the host adapter
5566 * @scsi_cmd: scsi command struct
5567 *
5568 * Return value:
1da177e4
LT
5569 * SUCCESS / FAILED
5570 **/
203fa3fe 5571static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5572{
8fa728a2
JG
5573 unsigned long flags;
5574 int rc;
6cdb0817 5575 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5576
5577 ENTER;
1da177e4 5578
6cdb0817
BK
5579 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5580
8fa728a2
JG
5581 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5582 rc = ipr_cancel_op(scsi_cmd);
5583 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5584
6cdb0817
BK
5585 if (rc == SUCCESS)
5586 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5587 LEAVE;
8fa728a2 5588 return rc;
1da177e4
LT
5589}
5590
5591/**
5592 * ipr_handle_other_interrupt - Handle "other" interrupts
5593 * @ioa_cfg: ioa config struct
634651fa 5594 * @int_reg: interrupt register
1da177e4
LT
5595 *
5596 * Return value:
5597 * IRQ_NONE / IRQ_HANDLED
5598 **/
634651fa 5599static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5600 u32 int_reg)
1da177e4
LT
5601{
5602 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5603 u32 int_mask_reg;
56d6aa33 5604
7dacb64f
WB
5605 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5606 int_reg &= ~int_mask_reg;
5607
5608 /* If an interrupt on the adapter did not occur, ignore it.
5609 * Or in the case of SIS 64, check for a stage change interrupt.
5610 */
5611 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5612 if (ioa_cfg->sis64) {
5613 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5614 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5615 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5616
5617 /* clear stage change */
5618 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5619 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5620 list_del(&ioa_cfg->reset_cmd->queue);
5621 del_timer(&ioa_cfg->reset_cmd->timer);
5622 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5623 return IRQ_HANDLED;
5624 }
5625 }
5626
5627 return IRQ_NONE;
5628 }
1da177e4
LT
5629
5630 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5631 /* Mask the interrupt */
5632 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
5633 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5634
5635 list_del(&ioa_cfg->reset_cmd->queue);
5636 del_timer(&ioa_cfg->reset_cmd->timer);
5637 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5638 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5639 if (ioa_cfg->clear_isr) {
5640 if (ipr_debug && printk_ratelimit())
5641 dev_err(&ioa_cfg->pdev->dev,
5642 "Spurious interrupt detected. 0x%08X\n", int_reg);
5643 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5644 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5645 return IRQ_NONE;
5646 }
1da177e4
LT
5647 } else {
5648 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5649 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5650 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5651 dev_err(&ioa_cfg->pdev->dev,
5652 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5653 else
5654 dev_err(&ioa_cfg->pdev->dev,
5655 "Permanent IOA failure. 0x%08X\n", int_reg);
5656
5657 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5658 ioa_cfg->sdt_state = GET_DUMP;
5659
5660 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5661 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5662 }
56d6aa33 5663
1da177e4
LT
5664 return rc;
5665}
5666
3feeb89d
WB
5667/**
5668 * ipr_isr_eh - Interrupt service routine error handler
5669 * @ioa_cfg: ioa config struct
5670 * @msg: message to log
5671 *
5672 * Return value:
5673 * none
5674 **/
05a6538a 5675static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5676{
5677 ioa_cfg->errors_logged++;
05a6538a 5678 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5679
5680 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5681 ioa_cfg->sdt_state = GET_DUMP;
5682
5683 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5684}
5685
b53d124a 5686static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5687 struct list_head *doneq)
5688{
5689 u32 ioasc;
5690 u16 cmd_index;
5691 struct ipr_cmnd *ipr_cmd;
5692 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5693 int num_hrrq = 0;
5694
5695 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5696 if (!hrr_queue->allow_interrupts)
05a6538a 5697 return 0;
5698
5699 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5700 hrr_queue->toggle_bit) {
5701
5702 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5703 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5704 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5705
5706 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5707 cmd_index < hrr_queue->min_cmd_id)) {
5708 ipr_isr_eh(ioa_cfg,
5709 "Invalid response handle from IOA: ",
5710 cmd_index);
5711 break;
5712 }
5713
5714 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5715 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5716
5717 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5718
5719 list_move_tail(&ipr_cmd->queue, doneq);
5720
5721 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5722 hrr_queue->hrrq_curr++;
5723 } else {
5724 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5725 hrr_queue->toggle_bit ^= 1u;
5726 }
5727 num_hrrq++;
b53d124a 5728 if (budget > 0 && num_hrrq >= budget)
5729 break;
05a6538a 5730 }
b53d124a 5731
05a6538a 5732 return num_hrrq;
5733}
b53d124a 5734
511cbce2 5735static int ipr_iopoll(struct irq_poll *iop, int budget)
b53d124a 5736{
5737 struct ipr_ioa_cfg *ioa_cfg;
5738 struct ipr_hrr_queue *hrrq;
5739 struct ipr_cmnd *ipr_cmd, *temp;
5740 unsigned long hrrq_flags;
5741 int completed_ops;
5742 LIST_HEAD(doneq);
5743
5744 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5745 ioa_cfg = hrrq->ioa_cfg;
5746
5747 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5748 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5749
5750 if (completed_ops < budget)
511cbce2 5751 irq_poll_complete(iop);
b53d124a 5752 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5753
5754 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5755 list_del(&ipr_cmd->queue);
5756 del_timer(&ipr_cmd->timer);
5757 ipr_cmd->fast_done(ipr_cmd);
5758 }
5759
5760 return completed_ops;
5761}
5762
1da177e4
LT
5763/**
5764 * ipr_isr - Interrupt service routine
5765 * @irq: irq number
5766 * @devp: pointer to ioa config struct
1da177e4
LT
5767 *
5768 * Return value:
5769 * IRQ_NONE / IRQ_HANDLED
5770 **/
7d12e780 5771static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5772{
05a6538a 5773 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5774 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5775 unsigned long hrrq_flags = 0;
7dacb64f 5776 u32 int_reg = 0;
3feeb89d 5777 int num_hrrq = 0;
7dacb64f 5778 int irq_none = 0;
172cd6e1 5779 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5780 irqreturn_t rc = IRQ_NONE;
172cd6e1 5781 LIST_HEAD(doneq);
1da177e4 5782
56d6aa33 5783 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5784 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5785 if (!hrrq->allow_interrupts) {
5786 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5787 return IRQ_NONE;
5788 }
5789
1da177e4 5790 while (1) {
b53d124a 5791 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5792 rc = IRQ_HANDLED;
1da177e4 5793
b53d124a 5794 if (!ioa_cfg->clear_isr)
5795 break;
7dd21308 5796
1da177e4 5797 /* Clear the PCI interrupt */
a5442ba4 5798 num_hrrq = 0;
3feeb89d 5799 do {
b53d124a 5800 writel(IPR_PCII_HRRQ_UPDATED,
5801 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5802 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5803 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5804 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5805
7dacb64f
WB
5806 } else if (rc == IRQ_NONE && irq_none == 0) {
5807 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5808 irq_none++;
a5442ba4
WB
5809 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5810 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5811 ipr_isr_eh(ioa_cfg,
5812 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5813 rc = IRQ_HANDLED;
b53d124a 5814 break;
1da177e4
LT
5815 } else
5816 break;
5817 }
5818
5819 if (unlikely(rc == IRQ_NONE))
634651fa 5820 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5821
56d6aa33 5822 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5823 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5824 list_del(&ipr_cmd->queue);
5825 del_timer(&ipr_cmd->timer);
5826 ipr_cmd->fast_done(ipr_cmd);
5827 }
05a6538a 5828 return rc;
5829}
5830
5831/**
5832 * ipr_isr_mhrrq - Interrupt service routine
5833 * @irq: irq number
5834 * @devp: pointer to ioa config struct
5835 *
5836 * Return value:
5837 * IRQ_NONE / IRQ_HANDLED
5838 **/
5839static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5840{
5841 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5842 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5843 unsigned long hrrq_flags = 0;
05a6538a 5844 struct ipr_cmnd *ipr_cmd, *temp;
5845 irqreturn_t rc = IRQ_NONE;
5846 LIST_HEAD(doneq);
172cd6e1 5847
56d6aa33 5848 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5849
5850 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5851 if (!hrrq->allow_interrupts) {
5852 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5853 return IRQ_NONE;
5854 }
5855
89f8b33c 5856 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5857 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5858 hrrq->toggle_bit) {
ea51190c 5859 irq_poll_sched(&hrrq->iopoll);
b53d124a 5860 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5861 return IRQ_HANDLED;
5862 }
5863 } else {
5864 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5865 hrrq->toggle_bit)
05a6538a 5866
b53d124a 5867 if (ipr_process_hrrq(hrrq, -1, &doneq))
5868 rc = IRQ_HANDLED;
5869 }
05a6538a 5870
56d6aa33 5871 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5872
5873 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5874 list_del(&ipr_cmd->queue);
5875 del_timer(&ipr_cmd->timer);
5876 ipr_cmd->fast_done(ipr_cmd);
5877 }
1da177e4
LT
5878 return rc;
5879}
5880
a32c055f
WB
5881/**
5882 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5883 * @ioa_cfg: ioa config struct
5884 * @ipr_cmd: ipr command struct
5885 *
5886 * Return value:
5887 * 0 on success / -1 on failure
5888 **/
5889static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5890 struct ipr_cmnd *ipr_cmd)
5891{
5892 int i, nseg;
5893 struct scatterlist *sg;
5894 u32 length;
5895 u32 ioadl_flags = 0;
5896 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5897 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5898 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5899
5900 length = scsi_bufflen(scsi_cmd);
5901 if (!length)
5902 return 0;
5903
5904 nseg = scsi_dma_map(scsi_cmd);
5905 if (nseg < 0) {
51f52a47 5906 if (printk_ratelimit())
d73341bf 5907 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5908 return -1;
5909 }
5910
5911 ipr_cmd->dma_use_sg = nseg;
5912
438b0331 5913 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5914 ioarcb->ioadl_len =
5915 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5916
a32c055f
WB
5917 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5918 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5919 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5920 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5921 ioadl_flags = IPR_IOADL_FLAGS_READ;
5922
5923 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5924 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5925 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5926 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5927 }
5928
5929 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5930 return 0;
5931}
5932
1da177e4
LT
5933/**
5934 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5935 * @ioa_cfg: ioa config struct
5936 * @ipr_cmd: ipr command struct
5937 *
5938 * Return value:
5939 * 0 on success / -1 on failure
5940 **/
5941static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5942 struct ipr_cmnd *ipr_cmd)
5943{
63015bc9
FT
5944 int i, nseg;
5945 struct scatterlist *sg;
1da177e4
LT
5946 u32 length;
5947 u32 ioadl_flags = 0;
5948 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5949 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5950 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5951
63015bc9
FT
5952 length = scsi_bufflen(scsi_cmd);
5953 if (!length)
1da177e4
LT
5954 return 0;
5955
63015bc9
FT
5956 nseg = scsi_dma_map(scsi_cmd);
5957 if (nseg < 0) {
d73341bf 5958 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
5959 return -1;
5960 }
51b1c7e1 5961
63015bc9
FT
5962 ipr_cmd->dma_use_sg = nseg;
5963
5964 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5965 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5966 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5967 ioarcb->data_transfer_length = cpu_to_be32(length);
5968 ioarcb->ioadl_len =
63015bc9
FT
5969 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5970 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5971 ioadl_flags = IPR_IOADL_FLAGS_READ;
5972 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5973 ioarcb->read_ioadl_len =
5974 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5975 }
1da177e4 5976
a32c055f
WB
5977 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5978 ioadl = ioarcb->u.add_data.u.ioadl;
5979 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5980 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5981 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5982 }
1da177e4 5983
63015bc9
FT
5984 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5985 ioadl[i].flags_and_data_len =
5986 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5987 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5988 }
5989
63015bc9
FT
5990 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5991 return 0;
1da177e4
LT
5992}
5993
1da177e4 5994/**
f646f325 5995 * __ipr_erp_done - Process completion of ERP for a device
1da177e4
LT
5996 * @ipr_cmd: ipr command struct
5997 *
5998 * This function copies the sense buffer into the scsi_cmd
5999 * struct and pushes the scsi_done function.
6000 *
6001 * Return value:
6002 * nothing
6003 **/
f646f325 6004static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6005{
6006 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6007 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6008 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
6009
6010 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6011 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
6012 scmd_printk(KERN_ERR, scsi_cmd,
6013 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
6014 } else {
6015 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6016 SCSI_SENSE_BUFFERSIZE);
6017 }
6018
6019 if (res) {
ee0a90fa 6020 if (!ipr_is_naca_model(res))
6021 res->needs_sync_complete = 1;
1da177e4
LT
6022 res->in_erp = 0;
6023 }
63015bc9 6024 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 6025 scsi_cmd->scsi_done(scsi_cmd);
66a0d59c
BK
6026 if (ipr_cmd->eh_comp)
6027 complete(ipr_cmd->eh_comp);
6028 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6029}
6030
f646f325
BK
6031/**
6032 * ipr_erp_done - Process completion of ERP for a device
6033 * @ipr_cmd: ipr command struct
6034 *
6035 * This function copies the sense buffer into the scsi_cmd
6036 * struct and pushes the scsi_done function.
6037 *
6038 * Return value:
6039 * nothing
6040 **/
6041static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6042{
6043 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6044 unsigned long hrrq_flags;
6045
6046 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6047 __ipr_erp_done(ipr_cmd);
6048 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
1da177e4
LT
6049}
6050
6051/**
6052 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6053 * @ipr_cmd: ipr command struct
6054 *
6055 * Return value:
6056 * none
6057 **/
6058static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6059{
51b1c7e1 6060 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 6061 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 6062 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
6063
6064 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 6065 ioarcb->data_transfer_length = 0;
1da177e4 6066 ioarcb->read_data_transfer_length = 0;
a32c055f 6067 ioarcb->ioadl_len = 0;
1da177e4 6068 ioarcb->read_ioadl_len = 0;
96d21f00
WB
6069 ioasa->hdr.ioasc = 0;
6070 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
6071
6072 if (ipr_cmd->ioa_cfg->sis64)
6073 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6074 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6075 else {
6076 ioarcb->write_ioadl_addr =
6077 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6078 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6079 }
1da177e4
LT
6080}
6081
6082/**
f646f325 6083 * __ipr_erp_request_sense - Send request sense to a device
1da177e4
LT
6084 * @ipr_cmd: ipr command struct
6085 *
6086 * This function sends a request sense to a device as a result
6087 * of a check condition.
6088 *
6089 * Return value:
6090 * nothing
6091 **/
f646f325 6092static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6093{
6094 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 6095 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
6096
6097 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
f646f325 6098 __ipr_erp_done(ipr_cmd);
1da177e4
LT
6099 return;
6100 }
6101
6102 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6103
6104 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6105 cmd_pkt->cdb[0] = REQUEST_SENSE;
6106 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6107 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6108 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6109 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6110
a32c055f
WB
6111 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6112 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6113
6114 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6115 IPR_REQUEST_SENSE_TIMEOUT * 2);
6116}
6117
f646f325
BK
6118/**
6119 * ipr_erp_request_sense - Send request sense to a device
6120 * @ipr_cmd: ipr command struct
6121 *
6122 * This function sends a request sense to a device as a result
6123 * of a check condition.
6124 *
6125 * Return value:
6126 * nothing
6127 **/
6128static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6129{
6130 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6131 unsigned long hrrq_flags;
6132
6133 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6134 __ipr_erp_request_sense(ipr_cmd);
6135 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6136}
6137
1da177e4
LT
6138/**
6139 * ipr_erp_cancel_all - Send cancel all to a device
6140 * @ipr_cmd: ipr command struct
6141 *
6142 * This function sends a cancel all to a device to clear the
6143 * queue. If we are running TCQ on the device, QERR is set to 1,
6144 * which means all outstanding ops have been dropped on the floor.
6145 * Cancel all will return them to us.
6146 *
6147 * Return value:
6148 * nothing
6149 **/
6150static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6151{
6152 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6153 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6154 struct ipr_cmd_pkt *cmd_pkt;
6155
6156 res->in_erp = 1;
6157
6158 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6159
17ea0126 6160 if (!scsi_cmd->device->simple_tags) {
f646f325 6161 __ipr_erp_request_sense(ipr_cmd);
1da177e4
LT
6162 return;
6163 }
6164
6165 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6166 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6167 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6168
6169 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6170 IPR_CANCEL_ALL_TIMEOUT);
6171}
6172
6173/**
6174 * ipr_dump_ioasa - Dump contents of IOASA
6175 * @ioa_cfg: ioa config struct
6176 * @ipr_cmd: ipr command struct
fe964d0a 6177 * @res: resource entry struct
1da177e4
LT
6178 *
6179 * This function is invoked by the interrupt handler when ops
6180 * fail. It will log the IOASA if appropriate. Only called
6181 * for GPDD ops.
6182 *
6183 * Return value:
6184 * none
6185 **/
6186static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 6187 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
6188{
6189 int i;
6190 u16 data_len;
b0692dd4 6191 u32 ioasc, fd_ioasc;
96d21f00 6192 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
6193 __be32 *ioasa_data = (__be32 *)ioasa;
6194 int error_index;
6195
96d21f00
WB
6196 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6197 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6198
6199 if (0 == ioasc)
6200 return;
6201
6202 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6203 return;
6204
b0692dd4
BK
6205 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6206 error_index = ipr_get_error(fd_ioasc);
6207 else
6208 error_index = ipr_get_error(ioasc);
1da177e4
LT
6209
6210 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6211 /* Don't log an error if the IOA already logged one */
96d21f00 6212 if (ioasa->hdr.ilid != 0)
1da177e4
LT
6213 return;
6214
cc9bd5d4
BK
6215 if (!ipr_is_gscsi(res))
6216 return;
6217
1da177e4
LT
6218 if (ipr_error_table[error_index].log_ioasa == 0)
6219 return;
6220 }
6221
fe964d0a 6222 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 6223
96d21f00
WB
6224 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6225 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6226 data_len = sizeof(struct ipr_ioasa64);
6227 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 6228 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
6229
6230 ipr_err("IOASA Dump:\n");
6231
6232 for (i = 0; i < data_len / 4; i += 4) {
6233 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6234 be32_to_cpu(ioasa_data[i]),
6235 be32_to_cpu(ioasa_data[i+1]),
6236 be32_to_cpu(ioasa_data[i+2]),
6237 be32_to_cpu(ioasa_data[i+3]));
6238 }
6239}
6240
6241/**
6242 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6243 * @ioasa: IOASA
6244 * @sense_buf: sense data buffer
6245 *
6246 * Return value:
6247 * none
6248 **/
6249static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6250{
6251 u32 failing_lba;
6252 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6253 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
6254 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6255 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
6256
6257 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6258
6259 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6260 return;
6261
6262 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6263
6264 if (ipr_is_vset_device(res) &&
6265 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6266 ioasa->u.vset.failing_lba_hi != 0) {
6267 sense_buf[0] = 0x72;
6268 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6269 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6270 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6271
6272 sense_buf[7] = 12;
6273 sense_buf[8] = 0;
6274 sense_buf[9] = 0x0A;
6275 sense_buf[10] = 0x80;
6276
6277 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6278
6279 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6280 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6281 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6282 sense_buf[15] = failing_lba & 0x000000ff;
6283
6284 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6285
6286 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6287 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6288 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6289 sense_buf[19] = failing_lba & 0x000000ff;
6290 } else {
6291 sense_buf[0] = 0x70;
6292 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6293 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6294 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6295
6296 /* Illegal request */
6297 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6298 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6299 sense_buf[7] = 10; /* additional length */
6300
6301 /* IOARCB was in error */
6302 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6303 sense_buf[15] = 0xC0;
6304 else /* Parameter data was invalid */
6305 sense_buf[15] = 0x80;
6306
6307 sense_buf[16] =
6308 ((IPR_FIELD_POINTER_MASK &
96d21f00 6309 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6310 sense_buf[17] =
6311 (IPR_FIELD_POINTER_MASK &
96d21f00 6312 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6313 } else {
6314 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6315 if (ipr_is_vset_device(res))
6316 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6317 else
6318 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6319
6320 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6321 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6322 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6323 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6324 sense_buf[6] = failing_lba & 0x000000ff;
6325 }
6326
6327 sense_buf[7] = 6; /* additional length */
6328 }
6329 }
6330}
6331
ee0a90fa 6332/**
6333 * ipr_get_autosense - Copy autosense data to sense buffer
6334 * @ipr_cmd: ipr command struct
6335 *
6336 * This function copies the autosense buffer to the buffer
6337 * in the scsi_cmd, if there is autosense available.
6338 *
6339 * Return value:
6340 * 1 if autosense was available / 0 if not
6341 **/
6342static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6343{
96d21f00
WB
6344 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6345 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6346
96d21f00 6347 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 6348 return 0;
6349
96d21f00
WB
6350 if (ipr_cmd->ioa_cfg->sis64)
6351 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6352 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6353 SCSI_SENSE_BUFFERSIZE));
6354 else
6355 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6356 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6357 SCSI_SENSE_BUFFERSIZE));
ee0a90fa 6358 return 1;
6359}
6360
1da177e4
LT
6361/**
6362 * ipr_erp_start - Process an error response for a SCSI op
6363 * @ioa_cfg: ioa config struct
6364 * @ipr_cmd: ipr command struct
6365 *
6366 * This function determines whether or not to initiate ERP
6367 * on the affected device.
6368 *
6369 * Return value:
6370 * nothing
6371 **/
6372static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6373 struct ipr_cmnd *ipr_cmd)
6374{
6375 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6376 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6377 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6378 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6379
6380 if (!res) {
f646f325 6381 __ipr_scsi_eh_done(ipr_cmd);
1da177e4
LT
6382 return;
6383 }
6384
8a048994 6385 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6386 ipr_gen_sense(ipr_cmd);
6387
cc9bd5d4
BK
6388 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6389
8a048994 6390 switch (masked_ioasc) {
1da177e4 6391 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 6392 if (ipr_is_naca_model(res))
6393 scsi_cmd->result |= (DID_ABORT << 16);
6394 else
6395 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6396 break;
6397 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6398 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6399 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6400 break;
6401 case IPR_IOASC_HW_SEL_TIMEOUT:
6402 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 6403 if (!ipr_is_naca_model(res))
6404 res->needs_sync_complete = 1;
1da177e4
LT
6405 break;
6406 case IPR_IOASC_SYNC_REQUIRED:
6407 if (!res->in_erp)
6408 res->needs_sync_complete = 1;
6409 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6410 break;
6411 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6412 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
785a4704
MFO
6413 /*
6414 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6415 * so SCSI mid-layer and upper layers handle it accordingly.
6416 */
6417 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6418 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
1da177e4
LT
6419 break;
6420 case IPR_IOASC_BUS_WAS_RESET:
6421 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6422 /*
6423 * Report the bus reset and ask for a retry. The device
6424 * will give CC/UA the next command.
6425 */
6426 if (!res->resetting_device)
6427 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6428 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6429 if (!ipr_is_naca_model(res))
6430 res->needs_sync_complete = 1;
1da177e4
LT
6431 break;
6432 case IPR_IOASC_HW_DEV_BUS_STATUS:
6433 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6434 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 6435 if (!ipr_get_autosense(ipr_cmd)) {
6436 if (!ipr_is_naca_model(res)) {
6437 ipr_erp_cancel_all(ipr_cmd);
6438 return;
6439 }
6440 }
1da177e4 6441 }
ee0a90fa 6442 if (!ipr_is_naca_model(res))
6443 res->needs_sync_complete = 1;
1da177e4
LT
6444 break;
6445 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6446 break;
f8ee25d7
WX
6447 case IPR_IOASC_IR_NON_OPTIMIZED:
6448 if (res->raw_mode) {
6449 res->raw_mode = 0;
6450 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6451 } else
6452 scsi_cmd->result |= (DID_ERROR << 16);
6453 break;
1da177e4 6454 default:
5b7304fb
BK
6455 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6456 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6457 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6458 res->needs_sync_complete = 1;
6459 break;
6460 }
6461
63015bc9 6462 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4 6463 scsi_cmd->scsi_done(scsi_cmd);
66a0d59c
BK
6464 if (ipr_cmd->eh_comp)
6465 complete(ipr_cmd->eh_comp);
6466 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6467}
6468
6469/**
6470 * ipr_scsi_done - mid-layer done function
6471 * @ipr_cmd: ipr command struct
6472 *
6473 * This function is invoked by the interrupt handler for
6474 * ops generated by the SCSI mid-layer
6475 *
6476 * Return value:
6477 * none
6478 **/
6479static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6480{
6481 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6482 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6483 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
36b8e180 6484 unsigned long lock_flags;
1da177e4 6485
96d21f00 6486 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6487
6488 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6489 scsi_dma_unmap(scsi_cmd);
6490
36b8e180 6491 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
1da177e4 6492 scsi_cmd->scsi_done(scsi_cmd);
66a0d59c
BK
6493 if (ipr_cmd->eh_comp)
6494 complete(ipr_cmd->eh_comp);
6495 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
36b8e180 6496 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
172cd6e1 6497 } else {
36b8e180
BK
6498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6499 spin_lock(&ipr_cmd->hrrq->_lock);
1da177e4 6500 ipr_erp_start(ioa_cfg, ipr_cmd);
36b8e180
BK
6501 spin_unlock(&ipr_cmd->hrrq->_lock);
6502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1 6503 }
1da177e4
LT
6504}
6505
1da177e4
LT
6506/**
6507 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6508 * @shost: scsi host struct
1da177e4 6509 * @scsi_cmd: scsi command struct
1da177e4
LT
6510 *
6511 * This function queues a request generated by the mid-layer.
6512 *
6513 * Return value:
6514 * 0 on success
6515 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6516 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6517 **/
00bfef2c
BK
6518static int ipr_queuecommand(struct Scsi_Host *shost,
6519 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6520{
6521 struct ipr_ioa_cfg *ioa_cfg;
6522 struct ipr_resource_entry *res;
6523 struct ipr_ioarcb *ioarcb;
6524 struct ipr_cmnd *ipr_cmd;
56d6aa33 6525 unsigned long hrrq_flags, lock_flags;
d12f1576 6526 int rc;
05a6538a 6527 struct ipr_hrr_queue *hrrq;
6528 int hrrq_id;
1da177e4 6529
00bfef2c
BK
6530 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6531
1da177e4 6532 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6533 res = scsi_cmd->device->hostdata;
56d6aa33 6534
6535 if (ipr_is_gata(res) && res->sata_port) {
6536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6537 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6539 return rc;
6540 }
6541
05a6538a 6542 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6543 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6544
56d6aa33 6545 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6546 /*
6547 * We are currently blocking all devices due to a host reset
6548 * We have told the host to stop giving us new requests, but
6549 * ERP ops don't count. FIXME
6550 */
bfae7820 6551 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6552 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6553 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6554 }
1da177e4
LT
6555
6556 /*
6557 * FIXME - Create scsi_set_host_offline interface
6558 * and the ioa_is_dead check can be removed
6559 */
bfae7820 6560 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6561 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6562 goto err_nodev;
1da177e4
LT
6563 }
6564
05a6538a 6565 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6566 if (ipr_cmd == NULL) {
56d6aa33 6567 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6568 return SCSI_MLQUEUE_HOST_BUSY;
6569 }
56d6aa33 6570 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6571
172cd6e1 6572 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6573 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6574
6575 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6576 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6577 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4 6578
4f92d01a 6579 if (ipr_is_gscsi(res)) {
1da177e4
LT
6580 if (scsi_cmd->underflow == 0)
6581 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6582
4f92d01a 6583 if (res->reset_occurred) {
0b1f8d44 6584 res->reset_occurred = 0;
ab6c10b1 6585 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6586 }
4f92d01a
GKB
6587 }
6588
6589 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6590 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6591
1da177e4 6592 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6593 if (scsi_cmd->flags & SCMD_TAGGED)
6594 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6595 else
6596 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6597 }
6598
6599 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6600 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6601 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6602 }
3cb4fc1f 6603 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
f8ee25d7 6604 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
1da177e4 6605
3cb4fc1f
GKB
6606 if (scsi_cmd->underflow == 0)
6607 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6608 }
6609
d12f1576
DC
6610 if (ioa_cfg->sis64)
6611 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6612 else
6613 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6614
56d6aa33 6615 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6616 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6617 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6618 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6619 if (!rc)
6620 scsi_dma_unmap(scsi_cmd);
a5fb407e 6621 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6622 }
6623
56d6aa33 6624 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6625 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6626 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6627 scsi_dma_unmap(scsi_cmd);
6628 goto err_nodev;
6629 }
6630
6631 ioarcb->res_handle = res->res_handle;
6632 if (res->needs_sync_complete) {
6633 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6634 res->needs_sync_complete = 0;
6635 }
05a6538a 6636 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6637 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6638 ipr_send_command(ipr_cmd);
56d6aa33 6639 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6640 return 0;
1da177e4 6641
00bfef2c 6642err_nodev:
56d6aa33 6643 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6644 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6645 scsi_cmd->result = (DID_NO_CONNECT << 16);
6646 scsi_cmd->scsi_done(scsi_cmd);
56d6aa33 6647 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6648 return 0;
6649}
f281233d 6650
35a39691
BK
6651/**
6652 * ipr_ioctl - IOCTL handler
6653 * @sdev: scsi device struct
6654 * @cmd: IOCTL cmd
6655 * @arg: IOCTL arg
6656 *
6657 * Return value:
6658 * 0 on success / other on failure
6659 **/
bd705f2d 6660static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
6661{
6662 struct ipr_resource_entry *res;
6663
6664 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6665 if (res && ipr_is_gata(res)) {
6666 if (cmd == HDIO_GET_IDENTITY)
6667 return -ENOTTY;
94be9a58 6668 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6669 }
35a39691
BK
6670
6671 return -EINVAL;
6672}
6673
1da177e4
LT
6674/**
6675 * ipr_info - Get information about the card/driver
6676 * @scsi_host: scsi host struct
6677 *
6678 * Return value:
6679 * pointer to buffer with description string
6680 **/
203fa3fe 6681static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6682{
6683 static char buffer[512];
6684 struct ipr_ioa_cfg *ioa_cfg;
6685 unsigned long lock_flags = 0;
6686
6687 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6688
6689 spin_lock_irqsave(host->host_lock, lock_flags);
6690 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6691 spin_unlock_irqrestore(host->host_lock, lock_flags);
6692
6693 return buffer;
6694}
6695
6696static struct scsi_host_template driver_template = {
6697 .module = THIS_MODULE,
6698 .name = "IPR",
6699 .info = ipr_ioa_info,
35a39691 6700 .ioctl = ipr_ioctl,
1da177e4
LT
6701 .queuecommand = ipr_queuecommand,
6702 .eh_abort_handler = ipr_eh_abort,
6703 .eh_device_reset_handler = ipr_eh_dev_reset,
6704 .eh_host_reset_handler = ipr_eh_host_reset,
6705 .slave_alloc = ipr_slave_alloc,
6706 .slave_configure = ipr_slave_configure,
6707 .slave_destroy = ipr_slave_destroy,
f688f96d 6708 .scan_finished = ipr_scan_finished,
35a39691
BK
6709 .target_alloc = ipr_target_alloc,
6710 .target_destroy = ipr_target_destroy,
1da177e4 6711 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6712 .bios_param = ipr_biosparam,
6713 .can_queue = IPR_MAX_COMMANDS,
6714 .this_id = -1,
6715 .sg_tablesize = IPR_MAX_SGLIST,
6716 .max_sectors = IPR_IOA_MAX_SECTORS,
6717 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6718 .use_clustering = ENABLE_CLUSTERING,
6719 .shost_attrs = ipr_ioa_attrs,
6720 .sdev_attrs = ipr_dev_attrs,
54b2b50c 6721 .proc_name = IPR_NAME,
1da177e4
LT
6722};
6723
35a39691
BK
6724/**
6725 * ipr_ata_phy_reset - libata phy_reset handler
6726 * @ap: ata port to reset
6727 *
6728 **/
6729static void ipr_ata_phy_reset(struct ata_port *ap)
6730{
6731 unsigned long flags;
6732 struct ipr_sata_port *sata_port = ap->private_data;
6733 struct ipr_resource_entry *res = sata_port->res;
6734 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6735 int rc;
6736
6737 ENTER;
6738 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6739 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6740 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6741 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6742 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6743 }
6744
56d6aa33 6745 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6746 goto out_unlock;
6747
6748 rc = ipr_device_reset(ioa_cfg, res);
6749
6750 if (rc) {
3e4ec344 6751 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6752 goto out_unlock;
6753 }
6754
3e7ebdfa
WB
6755 ap->link.device[0].class = res->ata_class;
6756 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6757 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6758
6759out_unlock:
6760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6761 LEAVE;
6762}
6763
6764/**
6765 * ipr_ata_post_internal - Cleanup after an internal command
6766 * @qc: ATA queued command
6767 *
6768 * Return value:
6769 * none
6770 **/
6771static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6772{
6773 struct ipr_sata_port *sata_port = qc->ap->private_data;
6774 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6775 struct ipr_cmnd *ipr_cmd;
05a6538a 6776 struct ipr_hrr_queue *hrrq;
35a39691
BK
6777 unsigned long flags;
6778
6779 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6780 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6782 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6783 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6784 }
6785
05a6538a 6786 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6787 spin_lock(&hrrq->_lock);
05a6538a 6788 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6789 if (ipr_cmd->qc == qc) {
6790 ipr_device_reset(ioa_cfg, sata_port->res);
6791 break;
6792 }
35a39691 6793 }
56d6aa33 6794 spin_unlock(&hrrq->_lock);
35a39691
BK
6795 }
6796 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6797}
6798
35a39691
BK
6799/**
6800 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6801 * @regs: destination
6802 * @tf: source ATA taskfile
6803 *
6804 * Return value:
6805 * none
6806 **/
6807static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6808 struct ata_taskfile *tf)
6809{
6810 regs->feature = tf->feature;
6811 regs->nsect = tf->nsect;
6812 regs->lbal = tf->lbal;
6813 regs->lbam = tf->lbam;
6814 regs->lbah = tf->lbah;
6815 regs->device = tf->device;
6816 regs->command = tf->command;
6817 regs->hob_feature = tf->hob_feature;
6818 regs->hob_nsect = tf->hob_nsect;
6819 regs->hob_lbal = tf->hob_lbal;
6820 regs->hob_lbam = tf->hob_lbam;
6821 regs->hob_lbah = tf->hob_lbah;
6822 regs->ctl = tf->ctl;
6823}
6824
6825/**
6826 * ipr_sata_done - done function for SATA commands
6827 * @ipr_cmd: ipr command struct
6828 *
6829 * This function is invoked by the interrupt handler for
6830 * ops generated by the SCSI mid-layer to SATA devices
6831 *
6832 * Return value:
6833 * none
6834 **/
6835static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6836{
6837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6838 struct ata_queued_cmd *qc = ipr_cmd->qc;
6839 struct ipr_sata_port *sata_port = qc->ap->private_data;
6840 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6841 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6842
56d6aa33 6843 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6844 if (ipr_cmd->ioa_cfg->sis64)
6845 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6846 sizeof(struct ipr_ioasa_gata));
6847 else
6848 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6849 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6850 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6851
96d21f00 6852 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6853 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6854
6855 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6856 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6857 else
96d21f00 6858 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6859 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6860 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6861 ata_qc_complete(qc);
6862}
6863
a32c055f
WB
6864/**
6865 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6866 * @ipr_cmd: ipr command struct
6867 * @qc: ATA queued command
6868 *
6869 **/
6870static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6871 struct ata_queued_cmd *qc)
6872{
6873 u32 ioadl_flags = 0;
6874 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6875 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6876 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6877 int len = qc->nbytes;
6878 struct scatterlist *sg;
6879 unsigned int si;
6880 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6881
6882 if (len == 0)
6883 return;
6884
6885 if (qc->dma_dir == DMA_TO_DEVICE) {
6886 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6887 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6888 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6889 ioadl_flags = IPR_IOADL_FLAGS_READ;
6890
6891 ioarcb->data_transfer_length = cpu_to_be32(len);
6892 ioarcb->ioadl_len =
6893 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6894 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6895 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6896
6897 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6898 ioadl64->flags = cpu_to_be32(ioadl_flags);
6899 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6900 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6901
6902 last_ioadl64 = ioadl64;
6903 ioadl64++;
6904 }
6905
6906 if (likely(last_ioadl64))
6907 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6908}
6909
35a39691
BK
6910/**
6911 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6912 * @ipr_cmd: ipr command struct
6913 * @qc: ATA queued command
6914 *
6915 **/
6916static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6917 struct ata_queued_cmd *qc)
6918{
6919 u32 ioadl_flags = 0;
6920 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6921 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6922 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6923 int len = qc->nbytes;
35a39691 6924 struct scatterlist *sg;
ff2aeb1e 6925 unsigned int si;
35a39691
BK
6926
6927 if (len == 0)
6928 return;
6929
6930 if (qc->dma_dir == DMA_TO_DEVICE) {
6931 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6932 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6933 ioarcb->data_transfer_length = cpu_to_be32(len);
6934 ioarcb->ioadl_len =
35a39691
BK
6935 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6936 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6937 ioadl_flags = IPR_IOADL_FLAGS_READ;
6938 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6939 ioarcb->read_ioadl_len =
6940 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6941 }
6942
ff2aeb1e 6943 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6944 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6945 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6946
6947 last_ioadl = ioadl;
6948 ioadl++;
35a39691 6949 }
3be6cbd7
JG
6950
6951 if (likely(last_ioadl))
6952 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6953}
6954
56d6aa33 6955/**
6956 * ipr_qc_defer - Get a free ipr_cmd
6957 * @qc: queued command
6958 *
6959 * Return value:
6960 * 0 if success
6961 **/
6962static int ipr_qc_defer(struct ata_queued_cmd *qc)
6963{
6964 struct ata_port *ap = qc->ap;
6965 struct ipr_sata_port *sata_port = ap->private_data;
6966 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6967 struct ipr_cmnd *ipr_cmd;
6968 struct ipr_hrr_queue *hrrq;
6969 int hrrq_id;
6970
6971 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6972 hrrq = &ioa_cfg->hrrq[hrrq_id];
6973
6974 qc->lldd_task = NULL;
6975 spin_lock(&hrrq->_lock);
6976 if (unlikely(hrrq->ioa_is_dead)) {
6977 spin_unlock(&hrrq->_lock);
6978 return 0;
6979 }
6980
6981 if (unlikely(!hrrq->allow_cmds)) {
6982 spin_unlock(&hrrq->_lock);
6983 return ATA_DEFER_LINK;
6984 }
6985
6986 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6987 if (ipr_cmd == NULL) {
6988 spin_unlock(&hrrq->_lock);
6989 return ATA_DEFER_LINK;
6990 }
6991
6992 qc->lldd_task = ipr_cmd;
6993 spin_unlock(&hrrq->_lock);
6994 return 0;
6995}
6996
35a39691
BK
6997/**
6998 * ipr_qc_issue - Issue a SATA qc to a device
6999 * @qc: queued command
7000 *
7001 * Return value:
7002 * 0 if success
7003 **/
7004static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7005{
7006 struct ata_port *ap = qc->ap;
7007 struct ipr_sata_port *sata_port = ap->private_data;
7008 struct ipr_resource_entry *res = sata_port->res;
7009 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7010 struct ipr_cmnd *ipr_cmd;
7011 struct ipr_ioarcb *ioarcb;
7012 struct ipr_ioarcb_ata_regs *regs;
7013
56d6aa33 7014 if (qc->lldd_task == NULL)
7015 ipr_qc_defer(qc);
7016
7017 ipr_cmd = qc->lldd_task;
7018 if (ipr_cmd == NULL)
0feeed82 7019 return AC_ERR_SYSTEM;
35a39691 7020
56d6aa33 7021 qc->lldd_task = NULL;
7022 spin_lock(&ipr_cmd->hrrq->_lock);
7023 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7024 ipr_cmd->hrrq->ioa_is_dead)) {
7025 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7026 spin_unlock(&ipr_cmd->hrrq->_lock);
7027 return AC_ERR_SYSTEM;
7028 }
7029
05a6538a 7030 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 7031 ioarcb = &ipr_cmd->ioarcb;
35a39691 7032
a32c055f
WB
7033 if (ioa_cfg->sis64) {
7034 regs = &ipr_cmd->i.ata_ioadl.regs;
7035 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7036 } else
7037 regs = &ioarcb->u.add_data.u.regs;
7038
7039 memset(regs, 0, sizeof(*regs));
7040 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 7041
56d6aa33 7042 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
7043 ipr_cmd->qc = qc;
7044 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 7045 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
7046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7047 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7048 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 7049 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 7050
a32c055f
WB
7051 if (ioa_cfg->sis64)
7052 ipr_build_ata_ioadl64(ipr_cmd, qc);
7053 else
7054 ipr_build_ata_ioadl(ipr_cmd, qc);
7055
35a39691
BK
7056 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7057 ipr_copy_sata_tf(regs, &qc->tf);
7058 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 7059 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
7060
7061 switch (qc->tf.protocol) {
7062 case ATA_PROT_NODATA:
7063 case ATA_PROT_PIO:
7064 break;
7065
7066 case ATA_PROT_DMA:
7067 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7068 break;
7069
0dc36888
TH
7070 case ATAPI_PROT_PIO:
7071 case ATAPI_PROT_NODATA:
35a39691
BK
7072 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7073 break;
7074
0dc36888 7075 case ATAPI_PROT_DMA:
35a39691
BK
7076 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7077 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7078 break;
7079
7080 default:
7081 WARN_ON(1);
56d6aa33 7082 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 7083 return AC_ERR_INVALID;
35a39691
BK
7084 }
7085
a32c055f 7086 ipr_send_command(ipr_cmd);
56d6aa33 7087 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 7088
35a39691
BK
7089 return 0;
7090}
7091
4c9bf4e7
TH
7092/**
7093 * ipr_qc_fill_rtf - Read result TF
7094 * @qc: ATA queued command
7095 *
7096 * Return value:
7097 * true
7098 **/
7099static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7100{
7101 struct ipr_sata_port *sata_port = qc->ap->private_data;
7102 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7103 struct ata_taskfile *tf = &qc->result_tf;
7104
7105 tf->feature = g->error;
7106 tf->nsect = g->nsect;
7107 tf->lbal = g->lbal;
7108 tf->lbam = g->lbam;
7109 tf->lbah = g->lbah;
7110 tf->device = g->device;
7111 tf->command = g->status;
7112 tf->hob_nsect = g->hob_nsect;
7113 tf->hob_lbal = g->hob_lbal;
7114 tf->hob_lbam = g->hob_lbam;
7115 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
7116
7117 return true;
7118}
7119
35a39691 7120static struct ata_port_operations ipr_sata_ops = {
35a39691 7121 .phy_reset = ipr_ata_phy_reset,
a1efdaba 7122 .hardreset = ipr_sata_reset,
35a39691 7123 .post_internal_cmd = ipr_ata_post_internal,
35a39691 7124 .qc_prep = ata_noop_qc_prep,
56d6aa33 7125 .qc_defer = ipr_qc_defer,
35a39691 7126 .qc_issue = ipr_qc_issue,
4c9bf4e7 7127 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
7128 .port_start = ata_sas_port_start,
7129 .port_stop = ata_sas_port_stop
7130};
7131
7132static struct ata_port_info sata_port_info = {
5067c046
SL
7133 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7134 ATA_FLAG_SAS_HOST,
0f2e0330
SS
7135 .pio_mask = ATA_PIO4_ONLY,
7136 .mwdma_mask = ATA_MWDMA2,
7137 .udma_mask = ATA_UDMA6,
35a39691
BK
7138 .port_ops = &ipr_sata_ops
7139};
7140
1da177e4
LT
7141#ifdef CONFIG_PPC_PSERIES
7142static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
7143 PVR_NORTHSTAR,
7144 PVR_PULSAR,
7145 PVR_POWER4,
7146 PVR_ICESTAR,
7147 PVR_SSTAR,
7148 PVR_POWER4p,
7149 PVR_630,
7150 PVR_630p
1da177e4
LT
7151};
7152
7153/**
7154 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7155 * @ioa_cfg: ioa cfg struct
7156 *
7157 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7158 * certain pSeries hardware. This function determines if the given
7159 * adapter is in one of these confgurations or not.
7160 *
7161 * Return value:
7162 * 1 if adapter is not supported / 0 if adapter is supported
7163 **/
7164static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7165{
1da177e4
LT
7166 int i;
7167
44c10138 7168 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 7169 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 7170 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 7171 return 1;
1da177e4
LT
7172 }
7173 }
7174 return 0;
7175}
7176#else
7177#define ipr_invalid_adapter(ioa_cfg) 0
7178#endif
7179
7180/**
7181 * ipr_ioa_bringdown_done - IOA bring down completion.
7182 * @ipr_cmd: ipr command struct
7183 *
7184 * This function processes the completion of an adapter bring down.
7185 * It wakes any reset sleepers.
7186 *
7187 * Return value:
7188 * IPR_RC_JOB_RETURN
7189 **/
7190static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7191{
7192 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 7193 int i;
1da177e4
LT
7194
7195 ENTER;
bfae7820
BK
7196 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7197 ipr_trace;
b0e17a9b
BK
7198 ioa_cfg->scsi_unblock = 1;
7199 schedule_work(&ioa_cfg->work_q);
bfae7820
BK
7200 }
7201
1da177e4
LT
7202 ioa_cfg->in_reset_reload = 0;
7203 ioa_cfg->reset_retries = 0;
96b04db9 7204 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7205 spin_lock(&ioa_cfg->hrrq[i]._lock);
7206 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7207 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7208 }
7209 wmb();
7210
05a6538a 7211 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 7212 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
7213 LEAVE;
7214
7215 return IPR_RC_JOB_RETURN;
7216}
7217
7218/**
7219 * ipr_ioa_reset_done - IOA reset completion.
7220 * @ipr_cmd: ipr command struct
7221 *
7222 * This function processes the completion of an adapter reset.
7223 * It schedules any necessary mid-layer add/removes and
7224 * wakes any reset sleepers.
7225 *
7226 * Return value:
7227 * IPR_RC_JOB_RETURN
7228 **/
7229static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7230{
7231 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7232 struct ipr_resource_entry *res;
afc3f83c 7233 int j;
1da177e4
LT
7234
7235 ENTER;
7236 ioa_cfg->in_reset_reload = 0;
56d6aa33 7237 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7238 spin_lock(&ioa_cfg->hrrq[j]._lock);
7239 ioa_cfg->hrrq[j].allow_cmds = 1;
7240 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7241 }
7242 wmb();
1da177e4 7243 ioa_cfg->reset_cmd = NULL;
3d1d0da6 7244 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
7245
7246 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 7247 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
7248 ipr_trace;
7249 break;
7250 }
7251 }
7252 schedule_work(&ioa_cfg->work_q);
7253
afc3f83c
BK
7254 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7255 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7256 if (j < IPR_NUM_LOG_HCAMS)
7257 ipr_send_hcam(ioa_cfg,
7258 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7259 ioa_cfg->hostrcb[j]);
1da177e4 7260 else
afc3f83c
BK
7261 ipr_send_hcam(ioa_cfg,
7262 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7263 ioa_cfg->hostrcb[j]);
1da177e4
LT
7264 }
7265
6bb04170 7266 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
7267 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7268
7269 ioa_cfg->reset_retries = 0;
05a6538a 7270 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
7271 wake_up_all(&ioa_cfg->reset_wait_q);
7272
b0e17a9b 7273 ioa_cfg->scsi_unblock = 1;
f688f96d 7274 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
7275 LEAVE;
7276 return IPR_RC_JOB_RETURN;
7277}
7278
7279/**
7280 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7281 * @supported_dev: supported device struct
7282 * @vpids: vendor product id struct
7283 *
7284 * Return value:
7285 * none
7286 **/
7287static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7288 struct ipr_std_inq_vpids *vpids)
7289{
7290 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7291 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7292 supported_dev->num_records = 1;
7293 supported_dev->data_length =
7294 cpu_to_be16(sizeof(struct ipr_supported_device));
7295 supported_dev->reserved = 0;
7296}
7297
7298/**
7299 * ipr_set_supported_devs - Send Set Supported Devices for a device
7300 * @ipr_cmd: ipr command struct
7301 *
a32c055f 7302 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
7303 *
7304 * Return value:
7305 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7306 **/
7307static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7308{
7309 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7310 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
7311 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7312 struct ipr_resource_entry *res = ipr_cmd->u.res;
7313
7314 ipr_cmd->job_step = ipr_ioa_reset_done;
7315
7316 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7317 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7318 continue;
7319
7320 ipr_cmd->u.res = res;
3e7ebdfa 7321 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7322
7323 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7324 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7325 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7326
7327 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7328 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7329 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7330 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7331
a32c055f
WB
7332 ipr_init_ioadl(ipr_cmd,
7333 ioa_cfg->vpd_cbs_dma +
7334 offsetof(struct ipr_misc_cbs, supp_dev),
7335 sizeof(struct ipr_supported_device),
7336 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7337
7338 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7339 IPR_SET_SUP_DEVICE_TIMEOUT);
7340
3e7ebdfa
WB
7341 if (!ioa_cfg->sis64)
7342 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7343 LEAVE;
1da177e4
LT
7344 return IPR_RC_JOB_RETURN;
7345 }
7346
05a6538a 7347 LEAVE;
1da177e4
LT
7348 return IPR_RC_JOB_CONTINUE;
7349}
7350
7351/**
7352 * ipr_get_mode_page - Locate specified mode page
7353 * @mode_pages: mode page buffer
7354 * @page_code: page code to find
7355 * @len: minimum required length for mode page
7356 *
7357 * Return value:
7358 * pointer to mode page / NULL on failure
7359 **/
7360static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7361 u32 page_code, u32 len)
7362{
7363 struct ipr_mode_page_hdr *mode_hdr;
7364 u32 page_length;
7365 u32 length;
7366
7367 if (!mode_pages || (mode_pages->hdr.length == 0))
7368 return NULL;
7369
7370 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7371 mode_hdr = (struct ipr_mode_page_hdr *)
7372 (mode_pages->data + mode_pages->hdr.block_desc_len);
7373
7374 while (length) {
7375 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7376 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7377 return mode_hdr;
7378 break;
7379 } else {
7380 page_length = (sizeof(struct ipr_mode_page_hdr) +
7381 mode_hdr->page_length);
7382 length -= page_length;
7383 mode_hdr = (struct ipr_mode_page_hdr *)
7384 ((unsigned long)mode_hdr + page_length);
7385 }
7386 }
7387 return NULL;
7388}
7389
7390/**
7391 * ipr_check_term_power - Check for term power errors
7392 * @ioa_cfg: ioa config struct
7393 * @mode_pages: IOAFP mode pages buffer
7394 *
7395 * Check the IOAFP's mode page 28 for term power errors
7396 *
7397 * Return value:
7398 * nothing
7399 **/
7400static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7401 struct ipr_mode_pages *mode_pages)
7402{
7403 int i;
7404 int entry_length;
7405 struct ipr_dev_bus_entry *bus;
7406 struct ipr_mode_page28 *mode_page;
7407
7408 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7409 sizeof(struct ipr_mode_page28));
7410
7411 entry_length = mode_page->entry_length;
7412
7413 bus = mode_page->bus;
7414
7415 for (i = 0; i < mode_page->num_entries; i++) {
7416 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7417 dev_err(&ioa_cfg->pdev->dev,
7418 "Term power is absent on scsi bus %d\n",
7419 bus->res_addr.bus);
7420 }
7421
7422 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7423 }
7424}
7425
7426/**
7427 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7428 * @ioa_cfg: ioa config struct
7429 *
7430 * Looks through the config table checking for SES devices. If
7431 * the SES device is in the SES table indicating a maximum SCSI
7432 * bus speed, the speed is limited for the bus.
7433 *
7434 * Return value:
7435 * none
7436 **/
7437static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7438{
7439 u32 max_xfer_rate;
7440 int i;
7441
7442 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7443 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7444 ioa_cfg->bus_attr[i].bus_width);
7445
7446 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7447 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7448 }
7449}
7450
7451/**
7452 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7453 * @ioa_cfg: ioa config struct
7454 * @mode_pages: mode page 28 buffer
7455 *
7456 * Updates mode page 28 based on driver configuration
7457 *
7458 * Return value:
7459 * none
7460 **/
7461static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7462 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7463{
7464 int i, entry_length;
7465 struct ipr_dev_bus_entry *bus;
7466 struct ipr_bus_attributes *bus_attr;
7467 struct ipr_mode_page28 *mode_page;
7468
7469 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7470 sizeof(struct ipr_mode_page28));
7471
7472 entry_length = mode_page->entry_length;
7473
7474 /* Loop for each device bus entry */
7475 for (i = 0, bus = mode_page->bus;
7476 i < mode_page->num_entries;
7477 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7478 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7479 dev_err(&ioa_cfg->pdev->dev,
7480 "Invalid resource address reported: 0x%08X\n",
7481 IPR_GET_PHYS_LOC(bus->res_addr));
7482 continue;
7483 }
7484
7485 bus_attr = &ioa_cfg->bus_attr[i];
7486 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7487 bus->bus_width = bus_attr->bus_width;
7488 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7489 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7490 if (bus_attr->qas_enabled)
7491 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7492 else
7493 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7494 }
7495}
7496
7497/**
7498 * ipr_build_mode_select - Build a mode select command
7499 * @ipr_cmd: ipr command struct
7500 * @res_handle: resource handle to send command to
7501 * @parm: Byte 2 of Mode Sense command
7502 * @dma_addr: DMA buffer address
7503 * @xfer_len: data transfer length
7504 *
7505 * Return value:
7506 * none
7507 **/
7508static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7509 __be32 res_handle, u8 parm,
7510 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7511{
1da177e4
LT
7512 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7513
7514 ioarcb->res_handle = res_handle;
7515 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7516 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7517 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7518 ioarcb->cmd_pkt.cdb[1] = parm;
7519 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7520
a32c055f 7521 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7522}
7523
7524/**
7525 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7526 * @ipr_cmd: ipr command struct
7527 *
7528 * This function sets up the SCSI bus attributes and sends
7529 * a Mode Select for Page 28 to activate them.
7530 *
7531 * Return value:
7532 * IPR_RC_JOB_RETURN
7533 **/
7534static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7535{
7536 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7537 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7538 int length;
7539
7540 ENTER;
4733804c
BK
7541 ipr_scsi_bus_speed_limit(ioa_cfg);
7542 ipr_check_term_power(ioa_cfg, mode_pages);
7543 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7544 length = mode_pages->hdr.length + 1;
7545 mode_pages->hdr.length = 0;
1da177e4
LT
7546
7547 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7548 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7549 length);
7550
f72919ec
WB
7551 ipr_cmd->job_step = ipr_set_supported_devs;
7552 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7553 struct ipr_resource_entry, queue);
1da177e4
LT
7554 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7555
7556 LEAVE;
7557 return IPR_RC_JOB_RETURN;
7558}
7559
7560/**
7561 * ipr_build_mode_sense - Builds a mode sense command
7562 * @ipr_cmd: ipr command struct
7563 * @res: resource entry struct
7564 * @parm: Byte 2 of mode sense command
7565 * @dma_addr: DMA address of mode sense buffer
7566 * @xfer_len: Size of DMA buffer
7567 *
7568 * Return value:
7569 * none
7570 **/
7571static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7572 __be32 res_handle,
a32c055f 7573 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7574{
1da177e4
LT
7575 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7576
7577 ioarcb->res_handle = res_handle;
7578 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7579 ioarcb->cmd_pkt.cdb[2] = parm;
7580 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7581 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7582
a32c055f 7583 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7584}
7585
dfed823e 7586/**
7587 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7588 * @ipr_cmd: ipr command struct
7589 *
7590 * This function handles the failure of an IOA bringup command.
7591 *
7592 * Return value:
7593 * IPR_RC_JOB_RETURN
7594 **/
7595static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7596{
7597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7598 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7599
7600 dev_err(&ioa_cfg->pdev->dev,
7601 "0x%02X failed with IOASC: 0x%08X\n",
7602 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7603
7604 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7605 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e 7606 return IPR_RC_JOB_RETURN;
7607}
7608
7609/**
7610 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7611 * @ipr_cmd: ipr command struct
7612 *
7613 * This function handles the failure of a Mode Sense to the IOAFP.
7614 * Some adapters do not handle all mode pages.
7615 *
7616 * Return value:
7617 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7618 **/
7619static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7620{
f72919ec 7621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7622 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7623
7624 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7625 ipr_cmd->job_step = ipr_set_supported_devs;
7626 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7627 struct ipr_resource_entry, queue);
dfed823e 7628 return IPR_RC_JOB_CONTINUE;
7629 }
7630
7631 return ipr_reset_cmd_failed(ipr_cmd);
7632}
7633
1da177e4
LT
7634/**
7635 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7636 * @ipr_cmd: ipr command struct
7637 *
7638 * This function send a Page 28 mode sense to the IOA to
7639 * retrieve SCSI bus attributes.
7640 *
7641 * Return value:
7642 * IPR_RC_JOB_RETURN
7643 **/
7644static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7645{
7646 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7647
7648 ENTER;
7649 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7650 0x28, ioa_cfg->vpd_cbs_dma +
7651 offsetof(struct ipr_misc_cbs, mode_pages),
7652 sizeof(struct ipr_mode_pages));
7653
7654 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7655 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7656
7657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7658
7659 LEAVE;
7660 return IPR_RC_JOB_RETURN;
7661}
7662
ac09c349
BK
7663/**
7664 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7665 * @ipr_cmd: ipr command struct
7666 *
7667 * This function enables dual IOA RAID support if possible.
7668 *
7669 * Return value:
7670 * IPR_RC_JOB_RETURN
7671 **/
7672static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7673{
7674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7675 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7676 struct ipr_mode_page24 *mode_page;
7677 int length;
7678
7679 ENTER;
7680 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7681 sizeof(struct ipr_mode_page24));
7682
7683 if (mode_page)
7684 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7685
7686 length = mode_pages->hdr.length + 1;
7687 mode_pages->hdr.length = 0;
7688
7689 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7690 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7691 length);
7692
7693 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7694 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7695
7696 LEAVE;
7697 return IPR_RC_JOB_RETURN;
7698}
7699
7700/**
7701 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7702 * @ipr_cmd: ipr command struct
7703 *
7704 * This function handles the failure of a Mode Sense to the IOAFP.
7705 * Some adapters do not handle all mode pages.
7706 *
7707 * Return value:
7708 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7709 **/
7710static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7711{
96d21f00 7712 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7713
7714 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7715 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7716 return IPR_RC_JOB_CONTINUE;
7717 }
7718
7719 return ipr_reset_cmd_failed(ipr_cmd);
7720}
7721
7722/**
7723 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7724 * @ipr_cmd: ipr command struct
7725 *
7726 * This function send a mode sense to the IOA to retrieve
7727 * the IOA Advanced Function Control mode page.
7728 *
7729 * Return value:
7730 * IPR_RC_JOB_RETURN
7731 **/
7732static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7733{
7734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7735
7736 ENTER;
7737 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7738 0x24, ioa_cfg->vpd_cbs_dma +
7739 offsetof(struct ipr_misc_cbs, mode_pages),
7740 sizeof(struct ipr_mode_pages));
7741
7742 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7743 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7744
7745 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7746
7747 LEAVE;
7748 return IPR_RC_JOB_RETURN;
7749}
7750
1da177e4
LT
7751/**
7752 * ipr_init_res_table - Initialize the resource table
7753 * @ipr_cmd: ipr command struct
7754 *
7755 * This function looks through the existing resource table, comparing
7756 * it with the config table. This function will take care of old/new
7757 * devices and schedule adding/removing them from the mid-layer
7758 * as appropriate.
7759 *
7760 * Return value:
7761 * IPR_RC_JOB_CONTINUE
7762 **/
7763static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7764{
7765 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7766 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7767 struct ipr_config_table_entry_wrapper cfgtew;
7768 int entries, found, flag, i;
1da177e4
LT
7769 LIST_HEAD(old_res);
7770
7771 ENTER;
3e7ebdfa
WB
7772 if (ioa_cfg->sis64)
7773 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7774 else
7775 flag = ioa_cfg->u.cfg_table->hdr.flags;
7776
7777 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7778 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7779
7780 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7781 list_move_tail(&res->queue, &old_res);
7782
3e7ebdfa 7783 if (ioa_cfg->sis64)
438b0331 7784 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7785 else
7786 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7787
7788 for (i = 0; i < entries; i++) {
7789 if (ioa_cfg->sis64)
7790 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7791 else
7792 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7793 found = 0;
7794
7795 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7796 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7797 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7798 found = 1;
7799 break;
7800 }
7801 }
7802
7803 if (!found) {
7804 if (list_empty(&ioa_cfg->free_res_q)) {
7805 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7806 break;
7807 }
7808
7809 found = 1;
7810 res = list_entry(ioa_cfg->free_res_q.next,
7811 struct ipr_resource_entry, queue);
7812 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7813 ipr_init_res_entry(res, &cfgtew);
1da177e4 7814 res->add_to_ml = 1;
56115598
WB
7815 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7816 res->sdev->allow_restart = 1;
1da177e4
LT
7817
7818 if (found)
3e7ebdfa 7819 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7820 }
7821
7822 list_for_each_entry_safe(res, temp, &old_res, queue) {
7823 if (res->sdev) {
7824 res->del_from_ml = 1;
3e7ebdfa 7825 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7826 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7827 }
7828 }
7829
3e7ebdfa
WB
7830 list_for_each_entry_safe(res, temp, &old_res, queue) {
7831 ipr_clear_res_target(res);
7832 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7833 }
7834
ac09c349
BK
7835 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7836 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7837 else
7838 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7839
7840 LEAVE;
7841 return IPR_RC_JOB_CONTINUE;
7842}
7843
7844/**
7845 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7846 * @ipr_cmd: ipr command struct
7847 *
7848 * This function sends a Query IOA Configuration command
7849 * to the adapter to retrieve the IOA configuration table.
7850 *
7851 * Return value:
7852 * IPR_RC_JOB_RETURN
7853 **/
7854static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7855{
7856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7857 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7858 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7859 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7860
7861 ENTER;
ac09c349
BK
7862 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7863 ioa_cfg->dual_raid = 1;
1da177e4
LT
7864 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7865 ucode_vpd->major_release, ucode_vpd->card_type,
7866 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7867 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7868 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7869
7870 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7871 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7872 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7873 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7874
3e7ebdfa 7875 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7876 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7877
7878 ipr_cmd->job_step = ipr_init_res_table;
7879
7880 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7881
7882 LEAVE;
7883 return IPR_RC_JOB_RETURN;
7884}
7885
1a47af26
GKB
7886static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7887{
7888 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7889
7890 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7891 return IPR_RC_JOB_CONTINUE;
7892
7893 return ipr_reset_cmd_failed(ipr_cmd);
7894}
7895
7896static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7897 __be32 res_handle, u8 sa_code)
7898{
7899 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7900
7901 ioarcb->res_handle = res_handle;
7902 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7903 ioarcb->cmd_pkt.cdb[1] = sa_code;
7904 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7905}
7906
7907/**
7908 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7909 * action
7910 *
7911 * Return value:
7912 * none
7913 **/
7914static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7915{
7916 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7917 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7918 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7919
7920 ENTER;
7921
7922 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7923
7924 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7925 ipr_build_ioa_service_action(ipr_cmd,
7926 cpu_to_be32(IPR_IOA_RES_HANDLE),
7927 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7928
7929 ioarcb->cmd_pkt.cdb[2] = 0x40;
7930
7931 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7932 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7933 IPR_SET_SUP_DEVICE_TIMEOUT);
7934
7935 LEAVE;
7936 return IPR_RC_JOB_RETURN;
7937 }
7938
7939 LEAVE;
7940 return IPR_RC_JOB_CONTINUE;
7941}
7942
1da177e4
LT
7943/**
7944 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7945 * @ipr_cmd: ipr command struct
7946 *
7947 * This utility function sends an inquiry to the adapter.
7948 *
7949 * Return value:
7950 * none
7951 **/
7952static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7953 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7954{
7955 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7956
7957 ENTER;
7958 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7959 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7960
7961 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7962 ioarcb->cmd_pkt.cdb[1] = flags;
7963 ioarcb->cmd_pkt.cdb[2] = page;
7964 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7965
a32c055f 7966 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7967
7968 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7969 LEAVE;
7970}
7971
62275040 7972/**
7973 * ipr_inquiry_page_supported - Is the given inquiry page supported
7974 * @page0: inquiry page 0 buffer
7975 * @page: page code.
7976 *
7977 * This function determines if the specified inquiry page is supported.
7978 *
7979 * Return value:
7980 * 1 if page is supported / 0 if not
7981 **/
7982static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7983{
7984 int i;
7985
7986 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7987 if (page0->page[i] == page)
7988 return 1;
7989
7990 return 0;
7991}
7992
1021b3ff
GKB
7993/**
7994 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7995 * @ipr_cmd: ipr command struct
7996 *
7997 * This function sends a Page 0xC4 inquiry to the adapter
7998 * to retrieve software VPD information.
7999 *
8000 * Return value:
8001 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8002 **/
8003static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8004{
8005 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8006 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8007 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8008
8009 ENTER;
1a47af26 8010 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
1021b3ff
GKB
8011 memset(pageC4, 0, sizeof(*pageC4));
8012
8013 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8014 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8015 (ioa_cfg->vpd_cbs_dma
8016 + offsetof(struct ipr_misc_cbs,
8017 pageC4_data)),
8018 sizeof(struct ipr_inquiry_pageC4));
8019 return IPR_RC_JOB_RETURN;
8020 }
8021
8022 LEAVE;
8023 return IPR_RC_JOB_CONTINUE;
8024}
8025
ac09c349
BK
8026/**
8027 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8028 * @ipr_cmd: ipr command struct
8029 *
8030 * This function sends a Page 0xD0 inquiry to the adapter
8031 * to retrieve adapter capabilities.
8032 *
8033 * Return value:
8034 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8035 **/
8036static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8037{
8038 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8039 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8040 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8041
8042 ENTER;
1021b3ff 8043 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
ac09c349
BK
8044 memset(cap, 0, sizeof(*cap));
8045
8046 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8047 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8048 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8049 sizeof(struct ipr_inquiry_cap));
8050 return IPR_RC_JOB_RETURN;
8051 }
8052
8053 LEAVE;
8054 return IPR_RC_JOB_CONTINUE;
8055}
8056
1da177e4
LT
8057/**
8058 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8059 * @ipr_cmd: ipr command struct
8060 *
8061 * This function sends a Page 3 inquiry to the adapter
8062 * to retrieve software VPD information.
8063 *
8064 * Return value:
8065 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8066 **/
8067static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 8068{
8069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 8070
8071 ENTER;
8072
ac09c349 8073 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 8074
8075 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8076 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8077 sizeof(struct ipr_inquiry_page3));
8078
8079 LEAVE;
8080 return IPR_RC_JOB_RETURN;
8081}
8082
8083/**
8084 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8085 * @ipr_cmd: ipr command struct
8086 *
8087 * This function sends a Page 0 inquiry to the adapter
8088 * to retrieve supported inquiry pages.
8089 *
8090 * Return value:
8091 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8092 **/
8093static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
8094{
8095 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8096 char type[5];
8097
8098 ENTER;
8099
8100 /* Grab the type out of the VPD and store it away */
8101 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8102 type[4] = '\0';
8103 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8104
f688f96d
BK
8105 if (ipr_invalid_adapter(ioa_cfg)) {
8106 dev_err(&ioa_cfg->pdev->dev,
8107 "Adapter not supported in this hardware configuration.\n");
8108
8109 if (!ipr_testmode) {
8110 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8111 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8112 list_add_tail(&ipr_cmd->queue,
8113 &ioa_cfg->hrrq->hrrq_free_q);
8114 return IPR_RC_JOB_RETURN;
8115 }
8116 }
8117
62275040 8118 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 8119
62275040 8120 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8121 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8122 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
8123
8124 LEAVE;
8125 return IPR_RC_JOB_RETURN;
8126}
8127
8128/**
8129 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8130 * @ipr_cmd: ipr command struct
8131 *
8132 * This function sends a standard inquiry to the adapter.
8133 *
8134 * Return value:
8135 * IPR_RC_JOB_RETURN
8136 **/
8137static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8138{
8139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8140
8141 ENTER;
62275040 8142 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
8143
8144 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8145 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8146 sizeof(struct ipr_ioa_vpd));
8147
8148 LEAVE;
8149 return IPR_RC_JOB_RETURN;
8150}
8151
8152/**
214777ba 8153 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
8154 * @ipr_cmd: ipr command struct
8155 *
8156 * This function send an Identify Host Request Response Queue
8157 * command to establish the HRRQ with the adapter.
8158 *
8159 * Return value:
8160 * IPR_RC_JOB_RETURN
8161 **/
214777ba 8162static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
8163{
8164 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8165 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 8166 struct ipr_hrr_queue *hrrq;
1da177e4
LT
8167
8168 ENTER;
05a6538a 8169 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
87adbe08
BK
8170 if (ioa_cfg->identify_hrrq_index == 0)
8171 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
1da177e4 8172
56d6aa33 8173 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8174 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 8175
05a6538a 8176 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8177 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 8178
05a6538a 8179 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8180 if (ioa_cfg->sis64)
8181 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 8182
05a6538a 8183 if (ioa_cfg->nvectors == 1)
8184 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8185 else
8186 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8187
8188 ioarcb->cmd_pkt.cdb[2] =
8189 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8190 ioarcb->cmd_pkt.cdb[3] =
8191 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8192 ioarcb->cmd_pkt.cdb[4] =
8193 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8194 ioarcb->cmd_pkt.cdb[5] =
8195 ((u64) hrrq->host_rrq_dma) & 0xff;
8196 ioarcb->cmd_pkt.cdb[7] =
8197 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8198 ioarcb->cmd_pkt.cdb[8] =
8199 (sizeof(u32) * hrrq->size) & 0xff;
8200
8201 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 8202 ioarcb->cmd_pkt.cdb[9] =
8203 ioa_cfg->identify_hrrq_index;
1da177e4 8204
05a6538a 8205 if (ioa_cfg->sis64) {
8206 ioarcb->cmd_pkt.cdb[10] =
8207 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8208 ioarcb->cmd_pkt.cdb[11] =
8209 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8210 ioarcb->cmd_pkt.cdb[12] =
8211 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8212 ioarcb->cmd_pkt.cdb[13] =
8213 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8214 }
8215
8216 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 8217 ioarcb->cmd_pkt.cdb[14] =
8218 ioa_cfg->identify_hrrq_index;
05a6538a 8219
8220 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8221 IPR_INTERNAL_TIMEOUT);
8222
56d6aa33 8223 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8224 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 8225
8226 LEAVE;
8227 return IPR_RC_JOB_RETURN;
05a6538a 8228 }
8229
1da177e4 8230 LEAVE;
05a6538a 8231 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
8232}
8233
8234/**
8235 * ipr_reset_timer_done - Adapter reset timer function
8236 * @ipr_cmd: ipr command struct
8237 *
8238 * Description: This function is used in adapter reset processing
8239 * for timing events. If the reset_cmd pointer in the IOA
8240 * config struct is not this adapter's we are doing nested
8241 * resets and fail_all_ops will take care of freeing the
8242 * command block.
8243 *
8244 * Return value:
8245 * none
8246 **/
738c6ec5 8247static void ipr_reset_timer_done(struct timer_list *t)
1da177e4 8248{
738c6ec5 8249 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
8250 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8251 unsigned long lock_flags = 0;
8252
8253 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8254
8255 if (ioa_cfg->reset_cmd == ipr_cmd) {
8256 list_del(&ipr_cmd->queue);
8257 ipr_cmd->done(ipr_cmd);
8258 }
8259
8260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8261}
8262
8263/**
8264 * ipr_reset_start_timer - Start a timer for adapter reset job
8265 * @ipr_cmd: ipr command struct
8266 * @timeout: timeout value
8267 *
8268 * Description: This function is used in adapter reset processing
8269 * for timing events. If the reset_cmd pointer in the IOA
8270 * config struct is not this adapter's we are doing nested
8271 * resets and fail_all_ops will take care of freeing the
8272 * command block.
8273 *
8274 * Return value:
8275 * none
8276 **/
8277static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8278 unsigned long timeout)
8279{
05a6538a 8280
8281 ENTER;
8282 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8283 ipr_cmd->done = ipr_reset_ioa_job;
8284
1da177e4 8285 ipr_cmd->timer.expires = jiffies + timeout;
841b86f3 8286 ipr_cmd->timer.function = ipr_reset_timer_done;
1da177e4
LT
8287 add_timer(&ipr_cmd->timer);
8288}
8289
8290/**
8291 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8292 * @ioa_cfg: ioa cfg struct
8293 *
8294 * Return value:
8295 * nothing
8296 **/
8297static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8298{
05a6538a 8299 struct ipr_hrr_queue *hrrq;
1da177e4 8300
05a6538a 8301 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 8302 spin_lock(&hrrq->_lock);
05a6538a 8303 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8304
8305 /* Initialize Host RRQ pointers */
8306 hrrq->hrrq_start = hrrq->host_rrq;
8307 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8308 hrrq->hrrq_curr = hrrq->hrrq_start;
8309 hrrq->toggle_bit = 1;
56d6aa33 8310 spin_unlock(&hrrq->_lock);
05a6538a 8311 }
56d6aa33 8312 wmb();
05a6538a 8313
56d6aa33 8314 ioa_cfg->identify_hrrq_index = 0;
8315 if (ioa_cfg->hrrq_num == 1)
8316 atomic_set(&ioa_cfg->hrrq_index, 0);
8317 else
8318 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
8319
8320 /* Zero out config table */
3e7ebdfa 8321 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
8322}
8323
214777ba
WB
8324/**
8325 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8326 * @ipr_cmd: ipr command struct
8327 *
8328 * Return value:
8329 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8330 **/
8331static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8332{
8333 unsigned long stage, stage_time;
8334 u32 feedback;
8335 volatile u32 int_reg;
8336 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8337 u64 maskval = 0;
8338
8339 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8340 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8341 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8342
8343 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8344
8345 /* sanity check the stage_time value */
438b0331
WB
8346 if (stage_time == 0)
8347 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8348 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
8349 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8350 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8351 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8352
8353 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8354 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8355 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8356 stage_time = ioa_cfg->transop_timeout;
8357 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8358 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
8359 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8360 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8361 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8362 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8363 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8364 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8365 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8366 return IPR_RC_JOB_CONTINUE;
8367 }
214777ba
WB
8368 }
8369
214777ba 8370 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
841b86f3 8371 ipr_cmd->timer.function = ipr_oper_timeout;
214777ba
WB
8372 ipr_cmd->done = ipr_reset_ioa_job;
8373 add_timer(&ipr_cmd->timer);
05a6538a 8374
8375 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
8376
8377 return IPR_RC_JOB_RETURN;
8378}
8379
1da177e4
LT
8380/**
8381 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8382 * @ipr_cmd: ipr command struct
8383 *
8384 * This function reinitializes some control blocks and
8385 * enables destructive diagnostics on the adapter.
8386 *
8387 * Return value:
8388 * IPR_RC_JOB_RETURN
8389 **/
8390static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8391{
8392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8393 volatile u32 int_reg;
7be96900 8394 volatile u64 maskval;
56d6aa33 8395 int i;
1da177e4
LT
8396
8397 ENTER;
214777ba 8398 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
8399 ipr_init_ioa_mem(ioa_cfg);
8400
56d6aa33 8401 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8402 spin_lock(&ioa_cfg->hrrq[i]._lock);
8403 ioa_cfg->hrrq[i].allow_interrupts = 1;
8404 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8405 }
8406 wmb();
8701f185
WB
8407 if (ioa_cfg->sis64) {
8408 /* Set the adapter to the correct endian mode. */
8409 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8410 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8411 }
8412
7be96900 8413 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8414
8415 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8416 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8417 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8418 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8419 return IPR_RC_JOB_CONTINUE;
8420 }
8421
8422 /* Enable destructive diagnostics on IOA */
214777ba
WB
8423 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8424
7be96900
WB
8425 if (ioa_cfg->sis64) {
8426 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8427 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8428 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8429 } else
8430 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8431
1da177e4
LT
8432 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8433
8434 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8435
214777ba
WB
8436 if (ioa_cfg->sis64) {
8437 ipr_cmd->job_step = ipr_reset_next_stage;
8438 return IPR_RC_JOB_CONTINUE;
8439 }
8440
5469cb5b 8441 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
841b86f3 8442 ipr_cmd->timer.function = ipr_oper_timeout;
1da177e4
LT
8443 ipr_cmd->done = ipr_reset_ioa_job;
8444 add_timer(&ipr_cmd->timer);
05a6538a 8445 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8446
8447 LEAVE;
8448 return IPR_RC_JOB_RETURN;
8449}
8450
8451/**
8452 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8453 * @ipr_cmd: ipr command struct
8454 *
8455 * This function is invoked when an adapter dump has run out
8456 * of processing time.
8457 *
8458 * Return value:
8459 * IPR_RC_JOB_CONTINUE
8460 **/
8461static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8462{
8463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8464
8465 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8466 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8467 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8468 ioa_cfg->sdt_state = ABORT_DUMP;
8469
4c647e90 8470 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8471 ipr_cmd->job_step = ipr_reset_alert;
8472
8473 return IPR_RC_JOB_CONTINUE;
8474}
8475
8476/**
8477 * ipr_unit_check_no_data - Log a unit check/no data error log
8478 * @ioa_cfg: ioa config struct
8479 *
8480 * Logs an error indicating the adapter unit checked, but for some
8481 * reason, we were unable to fetch the unit check buffer.
8482 *
8483 * Return value:
8484 * nothing
8485 **/
8486static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8487{
8488 ioa_cfg->errors_logged++;
8489 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8490}
8491
8492/**
8493 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8494 * @ioa_cfg: ioa config struct
8495 *
8496 * Fetches the unit check buffer from the adapter by clocking the data
8497 * through the mailbox register.
8498 *
8499 * Return value:
8500 * nothing
8501 **/
8502static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8503{
8504 unsigned long mailbox;
8505 struct ipr_hostrcb *hostrcb;
8506 struct ipr_uc_sdt sdt;
8507 int rc, length;
65f56475 8508 u32 ioasc;
1da177e4
LT
8509
8510 mailbox = readl(ioa_cfg->ioa_mailbox);
8511
dcbad00e 8512 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8513 ipr_unit_check_no_data(ioa_cfg);
8514 return;
8515 }
8516
8517 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8518 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8519 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8520
dcbad00e
WB
8521 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8522 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8523 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8524 ipr_unit_check_no_data(ioa_cfg);
8525 return;
8526 }
8527
8528 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8529 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8530 length = be32_to_cpu(sdt.entry[0].end_token);
8531 else
8532 length = (be32_to_cpu(sdt.entry[0].end_token) -
8533 be32_to_cpu(sdt.entry[0].start_token)) &
8534 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8535
8536 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8537 struct ipr_hostrcb, queue);
afc3f83c 8538 list_del_init(&hostrcb->queue);
1da177e4
LT
8539 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8540
8541 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8542 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8543 (__be32 *)&hostrcb->hcam,
8544 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8545
65f56475 8546 if (!rc) {
1da177e4 8547 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8548 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8549 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8550 ioa_cfg->sdt_state == GET_DUMP)
8551 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8552 } else
1da177e4
LT
8553 ipr_unit_check_no_data(ioa_cfg);
8554
8555 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8556}
8557
110def85
WB
8558/**
8559 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8560 * @ipr_cmd: ipr command struct
8561 *
8562 * Description: This function will call to get the unit check buffer.
8563 *
8564 * Return value:
8565 * IPR_RC_JOB_RETURN
8566 **/
8567static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8568{
8569 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8570
8571 ENTER;
8572 ioa_cfg->ioa_unit_checked = 0;
8573 ipr_get_unit_check_buffer(ioa_cfg);
8574 ipr_cmd->job_step = ipr_reset_alert;
8575 ipr_reset_start_timer(ipr_cmd, 0);
8576
8577 LEAVE;
8578 return IPR_RC_JOB_RETURN;
8579}
8580
f41f1d99
GKB
8581static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8582{
8583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8584
8585 ENTER;
8586
8587 if (ioa_cfg->sdt_state != GET_DUMP)
8588 return IPR_RC_JOB_RETURN;
8589
8590 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8591 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8592 IPR_PCII_MAILBOX_STABLE)) {
8593
8594 if (!ipr_cmd->u.time_left)
8595 dev_err(&ioa_cfg->pdev->dev,
8596 "Timed out waiting for Mailbox register.\n");
8597
8598 ioa_cfg->sdt_state = READ_DUMP;
8599 ioa_cfg->dump_timeout = 0;
8600 if (ioa_cfg->sis64)
8601 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8602 else
8603 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8604 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8605 schedule_work(&ioa_cfg->work_q);
8606
8607 } else {
8608 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8609 ipr_reset_start_timer(ipr_cmd,
8610 IPR_CHECK_FOR_RESET_TIMEOUT);
8611 }
8612
8613 LEAVE;
8614 return IPR_RC_JOB_RETURN;
8615}
8616
1da177e4
LT
8617/**
8618 * ipr_reset_restore_cfg_space - Restore PCI config space.
8619 * @ipr_cmd: ipr command struct
8620 *
8621 * Description: This function restores the saved PCI config space of
8622 * the adapter, fails all outstanding ops back to the callers, and
8623 * fetches the dump/unit check if applicable to this reset.
8624 *
8625 * Return value:
8626 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8627 **/
8628static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8629{
8630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 8631 u32 int_reg;
1da177e4
LT
8632
8633 ENTER;
99c965dd 8634 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8635 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8636
8637 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8638 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8639 return IPR_RC_JOB_CONTINUE;
8640 }
8641
8642 ipr_fail_all_ops(ioa_cfg);
8643
8701f185
WB
8644 if (ioa_cfg->sis64) {
8645 /* Set the adapter to the correct endian mode. */
8646 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8647 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8648 }
8649
1da177e4 8650 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8651 if (ioa_cfg->sis64) {
8652 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8653 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8654 return IPR_RC_JOB_RETURN;
8655 } else {
8656 ioa_cfg->ioa_unit_checked = 0;
8657 ipr_get_unit_check_buffer(ioa_cfg);
8658 ipr_cmd->job_step = ipr_reset_alert;
8659 ipr_reset_start_timer(ipr_cmd, 0);
8660 return IPR_RC_JOB_RETURN;
8661 }
1da177e4
LT
8662 }
8663
8664 if (ioa_cfg->in_ioa_bringdown) {
8665 ipr_cmd->job_step = ipr_ioa_bringdown_done;
f41f1d99
GKB
8666 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8667 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8668 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
1da177e4
LT
8669 } else {
8670 ipr_cmd->job_step = ipr_reset_enable_ioa;
1da177e4
LT
8671 }
8672
438b0331 8673 LEAVE;
1da177e4
LT
8674 return IPR_RC_JOB_CONTINUE;
8675}
8676
e619e1a7
BK
8677/**
8678 * ipr_reset_bist_done - BIST has completed on the adapter.
8679 * @ipr_cmd: ipr command struct
8680 *
8681 * Description: Unblock config space and resume the reset process.
8682 *
8683 * Return value:
8684 * IPR_RC_JOB_CONTINUE
8685 **/
8686static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8687{
fb51ccbf
JK
8688 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8689
e619e1a7 8690 ENTER;
fb51ccbf
JK
8691 if (ioa_cfg->cfg_locked)
8692 pci_cfg_access_unlock(ioa_cfg->pdev);
8693 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8694 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8695 LEAVE;
8696 return IPR_RC_JOB_CONTINUE;
8697}
8698
1da177e4
LT
8699/**
8700 * ipr_reset_start_bist - Run BIST on the adapter.
8701 * @ipr_cmd: ipr command struct
8702 *
8703 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8704 *
8705 * Return value:
8706 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8707 **/
8708static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8709{
8710 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8711 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8712
8713 ENTER;
cb237ef7
WB
8714 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8715 writel(IPR_UPROCI_SIS64_START_BIST,
8716 ioa_cfg->regs.set_uproc_interrupt_reg32);
8717 else
8718 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8719
8720 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8721 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8722 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8723 rc = IPR_RC_JOB_RETURN;
cb237ef7 8724 } else {
fb51ccbf
JK
8725 if (ioa_cfg->cfg_locked)
8726 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8727 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8728 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8729 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8730 }
8731
8732 LEAVE;
8733 return rc;
8734}
8735
463fc696
BK
8736/**
8737 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8738 * @ipr_cmd: ipr command struct
8739 *
8740 * Description: This clears PCI reset to the adapter and delays two seconds.
8741 *
8742 * Return value:
8743 * IPR_RC_JOB_RETURN
8744 **/
8745static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8746{
8747 ENTER;
463fc696
BK
8748 ipr_cmd->job_step = ipr_reset_bist_done;
8749 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8750 LEAVE;
8751 return IPR_RC_JOB_RETURN;
8752}
8753
2796ca5e
BK
8754/**
8755 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8756 * @work: work struct
8757 *
8758 * Description: This pulses warm reset to a slot.
8759 *
8760 **/
8761static void ipr_reset_reset_work(struct work_struct *work)
8762{
8763 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8765 struct pci_dev *pdev = ioa_cfg->pdev;
8766 unsigned long lock_flags = 0;
8767
8768 ENTER;
8769 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8770 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8771 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8772
8773 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8774 if (ioa_cfg->reset_cmd == ipr_cmd)
8775 ipr_reset_ioa_job(ipr_cmd);
8776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8777 LEAVE;
8778}
8779
463fc696
BK
8780/**
8781 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8782 * @ipr_cmd: ipr command struct
8783 *
8784 * Description: This asserts PCI reset to the adapter.
8785 *
8786 * Return value:
8787 * IPR_RC_JOB_RETURN
8788 **/
8789static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8790{
8791 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463fc696
BK
8792
8793 ENTER;
2796ca5e
BK
8794 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8795 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
463fc696 8796 ipr_cmd->job_step = ipr_reset_slot_reset_done;
463fc696
BK
8797 LEAVE;
8798 return IPR_RC_JOB_RETURN;
8799}
8800
fb51ccbf
JK
8801/**
8802 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8803 * @ipr_cmd: ipr command struct
8804 *
8805 * Description: This attempts to block config access to the IOA.
8806 *
8807 * Return value:
8808 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8809 **/
8810static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8811{
8812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8813 int rc = IPR_RC_JOB_CONTINUE;
8814
8815 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8816 ioa_cfg->cfg_locked = 1;
8817 ipr_cmd->job_step = ioa_cfg->reset;
8818 } else {
8819 if (ipr_cmd->u.time_left) {
8820 rc = IPR_RC_JOB_RETURN;
8821 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8822 ipr_reset_start_timer(ipr_cmd,
8823 IPR_CHECK_FOR_RESET_TIMEOUT);
8824 } else {
8825 ipr_cmd->job_step = ioa_cfg->reset;
8826 dev_err(&ioa_cfg->pdev->dev,
8827 "Timed out waiting to lock config access. Resetting anyway.\n");
8828 }
8829 }
8830
8831 return rc;
8832}
8833
8834/**
8835 * ipr_reset_block_config_access - Block config access to the IOA
8836 * @ipr_cmd: ipr command struct
8837 *
8838 * Description: This attempts to block config access to the IOA
8839 *
8840 * Return value:
8841 * IPR_RC_JOB_CONTINUE
8842 **/
8843static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8844{
8845 ipr_cmd->ioa_cfg->cfg_locked = 0;
8846 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8847 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8848 return IPR_RC_JOB_CONTINUE;
8849}
8850
1da177e4
LT
8851/**
8852 * ipr_reset_allowed - Query whether or not IOA can be reset
8853 * @ioa_cfg: ioa config struct
8854 *
8855 * Return value:
8856 * 0 if reset not allowed / non-zero if reset is allowed
8857 **/
8858static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8859{
8860 volatile u32 temp_reg;
8861
8862 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8863 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8864}
8865
8866/**
8867 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8868 * @ipr_cmd: ipr command struct
8869 *
8870 * Description: This function waits for adapter permission to run BIST,
8871 * then runs BIST. If the adapter does not give permission after a
8872 * reasonable time, we will reset the adapter anyway. The impact of
8873 * resetting the adapter without warning the adapter is the risk of
8874 * losing the persistent error log on the adapter. If the adapter is
8875 * reset while it is writing to the flash on the adapter, the flash
8876 * segment will have bad ECC and be zeroed.
8877 *
8878 * Return value:
8879 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8880 **/
8881static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8882{
8883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8884 int rc = IPR_RC_JOB_RETURN;
8885
8886 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8887 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8888 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8889 } else {
fb51ccbf 8890 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8891 rc = IPR_RC_JOB_CONTINUE;
8892 }
8893
8894 return rc;
8895}
8896
8897/**
8701f185 8898 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8899 * @ipr_cmd: ipr command struct
8900 *
8901 * Description: This function alerts the adapter that it will be reset.
8902 * If memory space is not currently enabled, proceed directly
8903 * to running BIST on the adapter. The timer must always be started
8904 * so we guarantee we do not run BIST from ipr_isr.
8905 *
8906 * Return value:
8907 * IPR_RC_JOB_RETURN
8908 **/
8909static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8910{
8911 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8912 u16 cmd_reg;
8913 int rc;
8914
8915 ENTER;
8916 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8917
8918 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8919 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8920 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8921 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8922 } else {
fb51ccbf 8923 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8924 }
8925
8926 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8927 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8928
8929 LEAVE;
8930 return IPR_RC_JOB_RETURN;
8931}
8932
4fdd7c7a
BK
8933/**
8934 * ipr_reset_quiesce_done - Complete IOA disconnect
8935 * @ipr_cmd: ipr command struct
8936 *
8937 * Description: Freeze the adapter to complete quiesce processing
8938 *
8939 * Return value:
8940 * IPR_RC_JOB_CONTINUE
8941 **/
8942static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8943{
8944 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8945
8946 ENTER;
8947 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8948 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8949 LEAVE;
8950 return IPR_RC_JOB_CONTINUE;
8951}
8952
8953/**
8954 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8955 * @ipr_cmd: ipr command struct
8956 *
8957 * Description: Ensure nothing is outstanding to the IOA and
8958 * proceed with IOA disconnect. Otherwise reset the IOA.
8959 *
8960 * Return value:
8961 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8962 **/
8963static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8964{
8965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8966 struct ipr_cmnd *loop_cmd;
8967 struct ipr_hrr_queue *hrrq;
8968 int rc = IPR_RC_JOB_CONTINUE;
8969 int count = 0;
8970
8971 ENTER;
8972 ipr_cmd->job_step = ipr_reset_quiesce_done;
8973
8974 for_each_hrrq(hrrq, ioa_cfg) {
8975 spin_lock(&hrrq->_lock);
8976 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8977 count++;
8978 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8979 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8980 rc = IPR_RC_JOB_RETURN;
8981 break;
8982 }
8983 spin_unlock(&hrrq->_lock);
8984
8985 if (count)
8986 break;
8987 }
8988
8989 LEAVE;
8990 return rc;
8991}
8992
8993/**
8994 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8995 * @ipr_cmd: ipr command struct
8996 *
8997 * Description: Cancel any oustanding HCAMs to the IOA.
8998 *
8999 * Return value:
9000 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9001 **/
9002static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9003{
9004 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9005 int rc = IPR_RC_JOB_CONTINUE;
9006 struct ipr_cmd_pkt *cmd_pkt;
9007 struct ipr_cmnd *hcam_cmd;
9008 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9009
9010 ENTER;
9011 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9012
9013 if (!hrrq->ioa_is_dead) {
9014 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9015 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9016 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9017 continue;
9018
9019 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9020 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9021 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9022 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9023 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9024 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9025 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9026 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9027 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9028 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9029 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9030 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9031 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9032 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9033
9034 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9035 IPR_CANCEL_TIMEOUT);
9036
9037 rc = IPR_RC_JOB_RETURN;
9038 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9039 break;
9040 }
9041 }
9042 } else
9043 ipr_cmd->job_step = ipr_reset_alert;
9044
9045 LEAVE;
9046 return rc;
9047}
9048
1da177e4
LT
9049/**
9050 * ipr_reset_ucode_download_done - Microcode download completion
9051 * @ipr_cmd: ipr command struct
9052 *
9053 * Description: This function unmaps the microcode download buffer.
9054 *
9055 * Return value:
9056 * IPR_RC_JOB_CONTINUE
9057 **/
9058static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9059{
9060 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9061 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9062
d73341bf 9063 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
9064 sglist->num_sg, DMA_TO_DEVICE);
9065
9066 ipr_cmd->job_step = ipr_reset_alert;
9067 return IPR_RC_JOB_CONTINUE;
9068}
9069
9070/**
9071 * ipr_reset_ucode_download - Download microcode to the adapter
9072 * @ipr_cmd: ipr command struct
9073 *
9074 * Description: This function checks to see if it there is microcode
9075 * to download to the adapter. If there is, a download is performed.
9076 *
9077 * Return value:
9078 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9079 **/
9080static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9081{
9082 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9083 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9084
9085 ENTER;
9086 ipr_cmd->job_step = ipr_reset_alert;
9087
9088 if (!sglist)
9089 return IPR_RC_JOB_CONTINUE;
9090
9091 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9092 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9093 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9094 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9095 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9096 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9097 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9098
a32c055f
WB
9099 if (ioa_cfg->sis64)
9100 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9101 else
9102 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
9103 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9104
9105 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9106 IPR_WRITE_BUFFER_TIMEOUT);
9107
9108 LEAVE;
9109 return IPR_RC_JOB_RETURN;
9110}
9111
9112/**
9113 * ipr_reset_shutdown_ioa - Shutdown the adapter
9114 * @ipr_cmd: ipr command struct
9115 *
9116 * Description: This function issues an adapter shutdown of the
9117 * specified type to the specified adapter as part of the
9118 * adapter reset job.
9119 *
9120 * Return value:
9121 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9122 **/
9123static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9124{
9125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9126 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9127 unsigned long timeout;
9128 int rc = IPR_RC_JOB_CONTINUE;
9129
9130 ENTER;
4fdd7c7a
BK
9131 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9132 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9133 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
56d6aa33 9134 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
9135 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9136 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9137 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9138 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9139
ac09c349
BK
9140 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9141 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
9142 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9143 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
9144 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9145 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 9146 else
ac09c349 9147 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
9148
9149 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9150
9151 rc = IPR_RC_JOB_RETURN;
9152 ipr_cmd->job_step = ipr_reset_ucode_download;
9153 } else
9154 ipr_cmd->job_step = ipr_reset_alert;
9155
9156 LEAVE;
9157 return rc;
9158}
9159
9160/**
9161 * ipr_reset_ioa_job - Adapter reset job
9162 * @ipr_cmd: ipr command struct
9163 *
9164 * Description: This function is the job router for the adapter reset job.
9165 *
9166 * Return value:
9167 * none
9168 **/
9169static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9170{
9171 u32 rc, ioasc;
1da177e4
LT
9172 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9173
9174 do {
96d21f00 9175 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
9176
9177 if (ioa_cfg->reset_cmd != ipr_cmd) {
9178 /*
9179 * We are doing nested adapter resets and this is
9180 * not the current reset job.
9181 */
05a6538a 9182 list_add_tail(&ipr_cmd->queue,
9183 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
9184 return;
9185 }
9186
9187 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 9188 rc = ipr_cmd->job_step_failed(ipr_cmd);
9189 if (rc == IPR_RC_JOB_RETURN)
9190 return;
1da177e4
LT
9191 }
9192
9193 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 9194 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 9195 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 9196 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
9197}
9198
9199/**
9200 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9201 * @ioa_cfg: ioa config struct
9202 * @job_step: first job step of reset job
9203 * @shutdown_type: shutdown type
9204 *
9205 * Description: This function will initiate the reset of the given adapter
9206 * starting at the selected job step.
9207 * If the caller needs to wait on the completion of the reset,
9208 * the caller must sleep on the reset_wait_q.
9209 *
9210 * Return value:
9211 * none
9212 **/
9213static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9214 int (*job_step) (struct ipr_cmnd *),
9215 enum ipr_shutdown_type shutdown_type)
9216{
9217 struct ipr_cmnd *ipr_cmd;
56d6aa33 9218 int i;
1da177e4
LT
9219
9220 ioa_cfg->in_reset_reload = 1;
56d6aa33 9221 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9222 spin_lock(&ioa_cfg->hrrq[i]._lock);
9223 ioa_cfg->hrrq[i].allow_cmds = 0;
9224 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9225 }
9226 wmb();
b0e17a9b
BK
9227 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9228 ioa_cfg->scsi_unblock = 0;
9229 ioa_cfg->scsi_blocked = 1;
bfae7820 9230 scsi_block_requests(ioa_cfg->host);
b0e17a9b 9231 }
1da177e4
LT
9232
9233 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9234 ioa_cfg->reset_cmd = ipr_cmd;
9235 ipr_cmd->job_step = job_step;
9236 ipr_cmd->u.shutdown_type = shutdown_type;
9237
9238 ipr_reset_ioa_job(ipr_cmd);
9239}
9240
9241/**
9242 * ipr_initiate_ioa_reset - Initiate an adapter reset
9243 * @ioa_cfg: ioa config struct
9244 * @shutdown_type: shutdown type
9245 *
9246 * Description: This function will initiate the reset of the given adapter.
9247 * If the caller needs to wait on the completion of the reset,
9248 * the caller must sleep on the reset_wait_q.
9249 *
9250 * Return value:
9251 * none
9252 **/
9253static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9254 enum ipr_shutdown_type shutdown_type)
9255{
56d6aa33 9256 int i;
9257
9258 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
9259 return;
9260
41e9a696
BK
9261 if (ioa_cfg->in_reset_reload) {
9262 if (ioa_cfg->sdt_state == GET_DUMP)
9263 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9264 else if (ioa_cfg->sdt_state == READ_DUMP)
9265 ioa_cfg->sdt_state = ABORT_DUMP;
9266 }
1da177e4
LT
9267
9268 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9269 dev_err(&ioa_cfg->pdev->dev,
9270 "IOA taken offline - error recovery failed\n");
9271
9272 ioa_cfg->reset_retries = 0;
56d6aa33 9273 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9274 spin_lock(&ioa_cfg->hrrq[i]._lock);
9275 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9276 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9277 }
9278 wmb();
1da177e4
LT
9279
9280 if (ioa_cfg->in_ioa_bringdown) {
9281 ioa_cfg->reset_cmd = NULL;
9282 ioa_cfg->in_reset_reload = 0;
9283 ipr_fail_all_ops(ioa_cfg);
9284 wake_up_all(&ioa_cfg->reset_wait_q);
9285
bfae7820 9286 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
b0e17a9b
BK
9287 ioa_cfg->scsi_unblock = 1;
9288 schedule_work(&ioa_cfg->work_q);
bfae7820 9289 }
1da177e4
LT
9290 return;
9291 } else {
9292 ioa_cfg->in_ioa_bringdown = 1;
9293 shutdown_type = IPR_SHUTDOWN_NONE;
9294 }
9295 }
9296
9297 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9298 shutdown_type);
9299}
9300
f8a88b19
LV
9301/**
9302 * ipr_reset_freeze - Hold off all I/O activity
9303 * @ipr_cmd: ipr command struct
9304 *
9305 * Description: If the PCI slot is frozen, hold off all I/O
9306 * activity; then, as soon as the slot is available again,
9307 * initiate an adapter reset.
9308 */
9309static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9310{
56d6aa33 9311 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9312 int i;
9313
f8a88b19 9314 /* Disallow new interrupts, avoid loop */
56d6aa33 9315 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9316 spin_lock(&ioa_cfg->hrrq[i]._lock);
9317 ioa_cfg->hrrq[i].allow_interrupts = 0;
9318 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9319 }
9320 wmb();
05a6538a 9321 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
9322 ipr_cmd->done = ipr_reset_ioa_job;
9323 return IPR_RC_JOB_RETURN;
9324}
9325
6270e593
BK
9326/**
9327 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9328 * @pdev: PCI device struct
9329 *
9330 * Description: This routine is called to tell us that the MMIO
9331 * access to the IOA has been restored
9332 */
9333static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9334{
9335 unsigned long flags = 0;
9336 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9337
9338 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9339 if (!ioa_cfg->probe_done)
9340 pci_save_state(pdev);
9341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9342 return PCI_ERS_RESULT_NEED_RESET;
9343}
9344
f8a88b19
LV
9345/**
9346 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9347 * @pdev: PCI device struct
9348 *
9349 * Description: This routine is called to tell us that the PCI bus
9350 * is down. Can't do anything here, except put the device driver
9351 * into a holding pattern, waiting for the PCI bus to come back.
9352 */
9353static void ipr_pci_frozen(struct pci_dev *pdev)
9354{
9355 unsigned long flags = 0;
9356 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9357
9358 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9359 if (ioa_cfg->probe_done)
9360 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
9361 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9362}
9363
9364/**
9365 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9366 * @pdev: PCI device struct
9367 *
9368 * Description: This routine is called by the pci error recovery
9369 * code after the PCI slot has been reset, just before we
9370 * should resume normal operations.
9371 */
9372static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9373{
9374 unsigned long flags = 0;
9375 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9376
9377 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9378 if (ioa_cfg->probe_done) {
9379 if (ioa_cfg->needs_warm_reset)
9380 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9381 else
9382 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9383 IPR_SHUTDOWN_NONE);
9384 } else
9385 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9386 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9387 return PCI_ERS_RESULT_RECOVERED;
9388}
9389
9390/**
9391 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9392 * @pdev: PCI device struct
9393 *
9394 * Description: This routine is called when the PCI bus has
9395 * permanently failed.
9396 */
9397static void ipr_pci_perm_failure(struct pci_dev *pdev)
9398{
9399 unsigned long flags = 0;
9400 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 9401 int i;
f8a88b19
LV
9402
9403 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9404 if (ioa_cfg->probe_done) {
9405 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9406 ioa_cfg->sdt_state = ABORT_DUMP;
9407 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9408 ioa_cfg->in_ioa_bringdown = 1;
9409 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9410 spin_lock(&ioa_cfg->hrrq[i]._lock);
9411 ioa_cfg->hrrq[i].allow_cmds = 0;
9412 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9413 }
9414 wmb();
9415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9416 } else
9417 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9419}
9420
9421/**
9422 * ipr_pci_error_detected - Called when a PCI error is detected.
9423 * @pdev: PCI device struct
9424 * @state: PCI channel state
9425 *
9426 * Description: Called when a PCI error is detected.
9427 *
9428 * Return value:
9429 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9430 */
9431static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9432 pci_channel_state_t state)
9433{
9434 switch (state) {
9435 case pci_channel_io_frozen:
9436 ipr_pci_frozen(pdev);
6270e593 9437 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
9438 case pci_channel_io_perm_failure:
9439 ipr_pci_perm_failure(pdev);
9440 return PCI_ERS_RESULT_DISCONNECT;
9441 break;
9442 default:
9443 break;
9444 }
9445 return PCI_ERS_RESULT_NEED_RESET;
9446}
9447
1da177e4
LT
9448/**
9449 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9450 * @ioa_cfg: ioa cfg struct
9451 *
183b8021 9452 * Description: This is the second phase of adapter initialization
1da177e4
LT
9453 * This function takes care of initilizing the adapter to the point
9454 * where it can accept new commands.
9455
9456 * Return value:
b1c11812 9457 * 0 on success / -EIO on failure
1da177e4 9458 **/
6f039790 9459static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9460{
9461 int rc = 0;
9462 unsigned long host_lock_flags = 0;
9463
9464 ENTER;
9465 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9466 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 9467 ioa_cfg->probe_done = 1;
ce155cce 9468 if (ioa_cfg->needs_hard_reset) {
9469 ioa_cfg->needs_hard_reset = 0;
9470 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9471 } else
9472 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9473 IPR_SHUTDOWN_NONE);
1da177e4 9474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
9475
9476 LEAVE;
9477 return rc;
9478}
9479
9480/**
9481 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9482 * @ioa_cfg: ioa config struct
9483 *
9484 * Return value:
9485 * none
9486 **/
9487static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9488{
9489 int i;
9490
a65e8f12
BK
9491 if (ioa_cfg->ipr_cmnd_list) {
9492 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9493 if (ioa_cfg->ipr_cmnd_list[i])
9494 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9495 ioa_cfg->ipr_cmnd_list[i],
9496 ioa_cfg->ipr_cmnd_list_dma[i]);
1da177e4 9497
a65e8f12
BK
9498 ioa_cfg->ipr_cmnd_list[i] = NULL;
9499 }
1da177e4
LT
9500 }
9501
9502 if (ioa_cfg->ipr_cmd_pool)
d73341bf 9503 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 9504
89aad428
BK
9505 kfree(ioa_cfg->ipr_cmnd_list);
9506 kfree(ioa_cfg->ipr_cmnd_list_dma);
9507 ioa_cfg->ipr_cmnd_list = NULL;
9508 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
9509 ioa_cfg->ipr_cmd_pool = NULL;
9510}
9511
9512/**
9513 * ipr_free_mem - Frees memory allocated for an adapter
9514 * @ioa_cfg: ioa cfg struct
9515 *
9516 * Return value:
9517 * nothing
9518 **/
9519static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9520{
9521 int i;
9522
9523 kfree(ioa_cfg->res_entries);
d73341bf
AB
9524 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9525 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 9526 ipr_free_cmd_blks(ioa_cfg);
05a6538a 9527
9528 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
9529 dma_free_coherent(&ioa_cfg->pdev->dev,
9530 sizeof(u32) * ioa_cfg->hrrq[i].size,
9531 ioa_cfg->hrrq[i].host_rrq,
9532 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9533
d73341bf
AB
9534 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9535 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9536
afc3f83c 9537 for (i = 0; i < IPR_MAX_HCAMS; i++) {
d73341bf
AB
9538 dma_free_coherent(&ioa_cfg->pdev->dev,
9539 sizeof(struct ipr_hostrcb),
9540 ioa_cfg->hostrcb[i],
9541 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
9542 }
9543
9544 ipr_free_dump(ioa_cfg);
1da177e4
LT
9545 kfree(ioa_cfg->trace);
9546}
9547
9548/**
2796ca5e
BK
9549 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9550 * @ioa_cfg: ipr cfg struct
1da177e4 9551 *
2796ca5e 9552 * This function frees all allocated IRQs for the
1da177e4
LT
9553 * specified adapter.
9554 *
9555 * Return value:
9556 * none
9557 **/
2796ca5e 9558static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9559{
9560 struct pci_dev *pdev = ioa_cfg->pdev;
a299ee62 9561 int i;
1da177e4 9562
a299ee62
CH
9563 for (i = 0; i < ioa_cfg->nvectors; i++)
9564 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9565 pci_free_irq_vectors(pdev);
2796ca5e 9566}
05a6538a 9567
2796ca5e
BK
9568/**
9569 * ipr_free_all_resources - Free all allocated resources for an adapter.
9570 * @ipr_cmd: ipr command struct
9571 *
9572 * This function frees all allocated resources for the
9573 * specified adapter.
9574 *
9575 * Return value:
9576 * none
9577 **/
9578static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9579{
9580 struct pci_dev *pdev = ioa_cfg->pdev;
05a6538a 9581
2796ca5e
BK
9582 ENTER;
9583 ipr_free_irqs(ioa_cfg);
9584 if (ioa_cfg->reset_work_q)
9585 destroy_workqueue(ioa_cfg->reset_work_q);
1da177e4
LT
9586 iounmap(ioa_cfg->hdw_dma_regs);
9587 pci_release_regions(pdev);
9588 ipr_free_mem(ioa_cfg);
9589 scsi_host_put(ioa_cfg->host);
9590 pci_disable_device(pdev);
9591 LEAVE;
9592}
9593
9594/**
9595 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9596 * @ioa_cfg: ioa config struct
9597 *
9598 * Return value:
9599 * 0 on success / -ENOMEM on allocation failure
9600 **/
6f039790 9601static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9602{
9603 struct ipr_cmnd *ipr_cmd;
9604 struct ipr_ioarcb *ioarcb;
9605 dma_addr_t dma_addr;
05a6538a 9606 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9607
d73341bf 9608 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9609 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9610
9611 if (!ioa_cfg->ipr_cmd_pool)
9612 return -ENOMEM;
9613
89aad428
BK
9614 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9615 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9616
9617 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9618 ipr_free_cmd_blks(ioa_cfg);
9619 return -ENOMEM;
9620 }
9621
05a6538a 9622 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9623 if (ioa_cfg->hrrq_num > 1) {
9624 if (i == 0) {
9625 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9626 ioa_cfg->hrrq[i].min_cmd_id = 0;
b82378e6
CIK
9627 ioa_cfg->hrrq[i].max_cmd_id =
9628 (entries_each_hrrq - 1);
05a6538a 9629 } else {
9630 entries_each_hrrq =
9631 IPR_NUM_BASE_CMD_BLKS/
9632 (ioa_cfg->hrrq_num - 1);
9633 ioa_cfg->hrrq[i].min_cmd_id =
9634 IPR_NUM_INTERNAL_CMD_BLKS +
9635 (i - 1) * entries_each_hrrq;
9636 ioa_cfg->hrrq[i].max_cmd_id =
9637 (IPR_NUM_INTERNAL_CMD_BLKS +
9638 i * entries_each_hrrq - 1);
9639 }
9640 } else {
9641 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9642 ioa_cfg->hrrq[i].min_cmd_id = 0;
9643 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9644 }
9645 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9646 }
9647
9648 BUG_ON(ioa_cfg->hrrq_num == 0);
9649
9650 i = IPR_NUM_CMD_BLKS -
9651 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9652 if (i > 0) {
9653 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9654 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9655 }
9656
1da177e4 9657 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8b1bb6dc
SJ
9658 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9659 GFP_KERNEL, &dma_addr);
1da177e4
LT
9660
9661 if (!ipr_cmd) {
9662 ipr_free_cmd_blks(ioa_cfg);
9663 return -ENOMEM;
9664 }
9665
1da177e4
LT
9666 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9667 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9668
9669 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9670 ipr_cmd->dma_addr = dma_addr;
9671 if (ioa_cfg->sis64)
9672 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9673 else
9674 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9675
1da177e4 9676 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9677 if (ioa_cfg->sis64) {
9678 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9679 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9680 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9681 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9682 } else {
9683 ioarcb->write_ioadl_addr =
9684 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9685 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9686 ioarcb->ioasa_host_pci_addr =
96d21f00 9687 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9688 }
1da177e4
LT
9689 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9690 ipr_cmd->cmd_index = i;
9691 ipr_cmd->ioa_cfg = ioa_cfg;
9692 ipr_cmd->sense_buffer_dma = dma_addr +
9693 offsetof(struct ipr_cmnd, sense_buffer);
9694
05a6538a 9695 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9696 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9697 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9698 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9699 hrrq_id++;
1da177e4
LT
9700 }
9701
9702 return 0;
9703}
9704
9705/**
9706 * ipr_alloc_mem - Allocate memory for an adapter
9707 * @ioa_cfg: ioa config struct
9708 *
9709 * Return value:
9710 * 0 on success / non-zero for error
9711 **/
6f039790 9712static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9713{
9714 struct pci_dev *pdev = ioa_cfg->pdev;
9715 int i, rc = -ENOMEM;
9716
9717 ENTER;
6396bb22
KC
9718 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9719 sizeof(struct ipr_resource_entry),
9720 GFP_KERNEL);
1da177e4
LT
9721
9722 if (!ioa_cfg->res_entries)
9723 goto out;
9724
3e7ebdfa 9725 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9726 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9727 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9728 }
1da177e4 9729
d73341bf
AB
9730 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9731 sizeof(struct ipr_misc_cbs),
9732 &ioa_cfg->vpd_cbs_dma,
9733 GFP_KERNEL);
1da177e4
LT
9734
9735 if (!ioa_cfg->vpd_cbs)
9736 goto out_free_res_entries;
9737
9738 if (ipr_alloc_cmd_blks(ioa_cfg))
9739 goto out_free_vpd_cbs;
9740
05a6538a 9741 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9742 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9743 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9744 &ioa_cfg->hrrq[i].host_rrq_dma,
9745 GFP_KERNEL);
05a6538a 9746
9747 if (!ioa_cfg->hrrq[i].host_rrq) {
9748 while (--i > 0)
d73341bf 9749 dma_free_coherent(&pdev->dev,
05a6538a 9750 sizeof(u32) * ioa_cfg->hrrq[i].size,
9751 ioa_cfg->hrrq[i].host_rrq,
9752 ioa_cfg->hrrq[i].host_rrq_dma);
9753 goto out_ipr_free_cmd_blocks;
9754 }
9755 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9756 }
1da177e4 9757
d73341bf
AB
9758 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9759 ioa_cfg->cfg_table_size,
9760 &ioa_cfg->cfg_table_dma,
9761 GFP_KERNEL);
1da177e4 9762
3e7ebdfa 9763 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9764 goto out_free_host_rrq;
9765
afc3f83c 9766 for (i = 0; i < IPR_MAX_HCAMS; i++) {
d73341bf
AB
9767 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9768 sizeof(struct ipr_hostrcb),
9769 &ioa_cfg->hostrcb_dma[i],
9770 GFP_KERNEL);
1da177e4
LT
9771
9772 if (!ioa_cfg->hostrcb[i])
9773 goto out_free_hostrcb_dma;
9774
9775 ioa_cfg->hostrcb[i]->hostrcb_dma =
9776 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9777 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9778 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9779 }
9780
6396bb22
KC
9781 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9782 sizeof(struct ipr_trace_entry),
9783 GFP_KERNEL);
1da177e4
LT
9784
9785 if (!ioa_cfg->trace)
9786 goto out_free_hostrcb_dma;
9787
1da177e4
LT
9788 rc = 0;
9789out:
9790 LEAVE;
9791 return rc;
9792
9793out_free_hostrcb_dma:
9794 while (i-- > 0) {
d73341bf
AB
9795 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9796 ioa_cfg->hostrcb[i],
9797 ioa_cfg->hostrcb_dma[i]);
1da177e4 9798 }
d73341bf
AB
9799 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9800 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9801out_free_host_rrq:
05a6538a 9802 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9803 dma_free_coherent(&pdev->dev,
9804 sizeof(u32) * ioa_cfg->hrrq[i].size,
9805 ioa_cfg->hrrq[i].host_rrq,
9806 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9807 }
1da177e4
LT
9808out_ipr_free_cmd_blocks:
9809 ipr_free_cmd_blks(ioa_cfg);
9810out_free_vpd_cbs:
d73341bf
AB
9811 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9812 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9813out_free_res_entries:
9814 kfree(ioa_cfg->res_entries);
9815 goto out;
9816}
9817
9818/**
9819 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9820 * @ioa_cfg: ioa config struct
9821 *
9822 * Return value:
9823 * none
9824 **/
6f039790 9825static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9826{
9827 int i;
9828
9829 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9830 ioa_cfg->bus_attr[i].bus = i;
9831 ioa_cfg->bus_attr[i].qas_enabled = 0;
9832 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9833 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9834 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9835 else
9836 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9837 }
9838}
9839
6270e593
BK
9840/**
9841 * ipr_init_regs - Initialize IOA registers
9842 * @ioa_cfg: ioa config struct
9843 *
9844 * Return value:
9845 * none
9846 **/
9847static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9848{
9849 const struct ipr_interrupt_offsets *p;
9850 struct ipr_interrupts *t;
9851 void __iomem *base;
9852
9853 p = &ioa_cfg->chip_cfg->regs;
9854 t = &ioa_cfg->regs;
9855 base = ioa_cfg->hdw_dma_regs;
9856
9857 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9858 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9859 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9860 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9861 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9862 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9863 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9864 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9865 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9866 t->ioarrin_reg = base + p->ioarrin_reg;
9867 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9868 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9869 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9870 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9871 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9872 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9873
9874 if (ioa_cfg->sis64) {
9875 t->init_feedback_reg = base + p->init_feedback_reg;
9876 t->dump_addr_reg = base + p->dump_addr_reg;
9877 t->dump_data_reg = base + p->dump_data_reg;
9878 t->endian_swap_reg = base + p->endian_swap_reg;
9879 }
9880}
9881
1da177e4
LT
9882/**
9883 * ipr_init_ioa_cfg - Initialize IOA config struct
9884 * @ioa_cfg: ioa config struct
9885 * @host: scsi host struct
9886 * @pdev: PCI dev struct
9887 *
9888 * Return value:
9889 * none
9890 **/
6f039790
GKH
9891static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9892 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9893{
6270e593 9894 int i;
1da177e4
LT
9895
9896 ioa_cfg->host = host;
9897 ioa_cfg->pdev = pdev;
9898 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9899 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9900 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9901 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9902 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9903 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9904 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9905 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9906
1da177e4
LT
9907 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9908 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
afc3f83c 9909 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
1da177e4
LT
9910 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9911 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9912 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 9913 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9914 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9915 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9916 ioa_cfg->sdt_state = INACTIVE;
9917
9918 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9919 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9920
3e7ebdfa
WB
9921 if (ioa_cfg->sis64) {
9922 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9923 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9924 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9925 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9926 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9927 + ((sizeof(struct ipr_config_table_entry64)
9928 * ioa_cfg->max_devs_supported)));
3e7ebdfa
WB
9929 } else {
9930 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9931 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9932 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9933 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9934 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9935 + ((sizeof(struct ipr_config_table_entry)
9936 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9937 }
6270e593 9938
f688f96d 9939 host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9940 host->unique_id = host->host_no;
9941 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9942 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9943 pci_set_drvdata(pdev, ioa_cfg);
9944
6270e593
BK
9945 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9946 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9947 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9948 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9949 if (i == 0)
9950 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9951 else
9952 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 9953 }
1da177e4
LT
9954}
9955
9956/**
1be7bd82 9957 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
9958 * @dev_id: PCI device id struct
9959 *
9960 * Return value:
1be7bd82 9961 * ptr to chip information on success / NULL on failure
1da177e4 9962 **/
6f039790 9963static const struct ipr_chip_t *
1be7bd82 9964ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
9965{
9966 int i;
9967
1da177e4
LT
9968 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9969 if (ipr_chip[i].vendor == dev_id->vendor &&
9970 ipr_chip[i].device == dev_id->device)
1be7bd82 9971 return &ipr_chip[i];
1da177e4
LT
9972 return NULL;
9973}
9974
6270e593
BK
9975/**
9976 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9977 * during probe time
9978 * @ioa_cfg: ioa config struct
9979 *
9980 * Return value:
9981 * None
9982 **/
9983static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9984{
9985 struct pci_dev *pdev = ioa_cfg->pdev;
9986
9987 if (pci_channel_offline(pdev)) {
9988 wait_event_timeout(ioa_cfg->eeh_wait_q,
9989 !pci_channel_offline(pdev),
9990 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9991 pci_restore_state(pdev);
9992 }
9993}
9994
05a6538a 9995static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9996{
9997 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9998
9999 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10000 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10001 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10002 ioa_cfg->vectors_info[vec_idx].
10003 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10004 }
10005}
10006
a299ee62
CH
10007static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10008 struct pci_dev *pdev)
05a6538a 10009{
10010 int i, rc;
10011
10012 for (i = 1; i < ioa_cfg->nvectors; i++) {
a299ee62 10013 rc = request_irq(pci_irq_vector(pdev, i),
05a6538a 10014 ipr_isr_mhrrq,
10015 0,
10016 ioa_cfg->vectors_info[i].desc,
10017 &ioa_cfg->hrrq[i]);
10018 if (rc) {
10019 while (--i >= 0)
a299ee62 10020 free_irq(pci_irq_vector(pdev, i),
05a6538a 10021 &ioa_cfg->hrrq[i]);
10022 return rc;
10023 }
10024 }
10025 return 0;
10026}
10027
95fecd90
WB
10028/**
10029 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10030 * @pdev: PCI device struct
10031 *
10032 * Description: Simply set the msi_received flag to 1 indicating that
10033 * Message Signaled Interrupts are supported.
10034 *
10035 * Return value:
10036 * 0 on success / non-zero on failure
10037 **/
6f039790 10038static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
10039{
10040 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10041 unsigned long lock_flags = 0;
10042 irqreturn_t rc = IRQ_HANDLED;
10043
05a6538a 10044 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
10045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10046
10047 ioa_cfg->msi_received = 1;
10048 wake_up(&ioa_cfg->msi_wait_q);
10049
10050 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10051 return rc;
10052}
10053
10054/**
10055 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10056 * @pdev: PCI device struct
10057 *
a299ee62 10058 * Description: This routine sets up and initiates a test interrupt to determine
95fecd90
WB
10059 * if the interrupt is received via the ipr_test_intr() service routine.
10060 * If the tests fails, the driver will fall back to LSI.
10061 *
10062 * Return value:
10063 * 0 on success / non-zero on failure
10064 **/
6f039790 10065static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
10066{
10067 int rc;
10068 volatile u32 int_reg;
10069 unsigned long lock_flags = 0;
a299ee62 10070 int irq = pci_irq_vector(pdev, 0);
95fecd90
WB
10071
10072 ENTER;
10073
10074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10075 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10076 ioa_cfg->msi_received = 0;
10077 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 10078 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
10079 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10081
a299ee62 10082 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90 10083 if (rc) {
a299ee62 10084 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
95fecd90
WB
10085 return rc;
10086 } else if (ipr_debug)
a299ee62 10087 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
95fecd90 10088
214777ba 10089 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
10090 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10091 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 10092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
10093 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10094
95fecd90
WB
10095 if (!ioa_cfg->msi_received) {
10096 /* MSI test failed */
10097 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10098 rc = -EOPNOTSUPP;
10099 } else if (ipr_debug)
10100 dev_info(&pdev->dev, "MSI test succeeded.\n");
10101
10102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10103
a299ee62 10104 free_irq(irq, ioa_cfg);
95fecd90
WB
10105
10106 LEAVE;
10107
10108 return rc;
10109}
10110
05a6538a 10111 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
10112 * @pdev: PCI device struct
10113 * @dev_id: PCI device id struct
10114 *
10115 * Return value:
10116 * 0 on success / non-zero on failure
10117 **/
6f039790
GKH
10118static int ipr_probe_ioa(struct pci_dev *pdev,
10119 const struct pci_device_id *dev_id)
1da177e4
LT
10120{
10121 struct ipr_ioa_cfg *ioa_cfg;
10122 struct Scsi_Host *host;
10123 unsigned long ipr_regs_pci;
10124 void __iomem *ipr_regs;
a2a65a3e 10125 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 10126 volatile u32 mask, uproc, interrupts;
feccada9 10127 unsigned long lock_flags, driver_lock_flags;
a299ee62 10128 unsigned int irq_flag;
1da177e4
LT
10129
10130 ENTER;
10131
1da177e4 10132 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
10133 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10134
10135 if (!host) {
10136 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10137 rc = -ENOMEM;
6270e593 10138 goto out;
1da177e4
LT
10139 }
10140
10141 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10142 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 10143 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 10144
1be7bd82 10145 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 10146
1be7bd82 10147 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
10148 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10149 dev_id->vendor, dev_id->device);
10150 goto out_scsi_host_put;
10151 }
10152
a32c055f
WB
10153 /* set SIS 32 or SIS 64 */
10154 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 10155 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 10156 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 10157 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 10158
5469cb5b
BK
10159 if (ipr_transop_timeout)
10160 ioa_cfg->transop_timeout = ipr_transop_timeout;
10161 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10162 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10163 else
10164 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10165
44c10138 10166 ioa_cfg->revid = pdev->revision;
463fc696 10167
6270e593
BK
10168 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10169
1da177e4
LT
10170 ipr_regs_pci = pci_resource_start(pdev, 0);
10171
10172 rc = pci_request_regions(pdev, IPR_NAME);
10173 if (rc < 0) {
10174 dev_err(&pdev->dev,
10175 "Couldn't register memory range of registers\n");
10176 goto out_scsi_host_put;
10177 }
10178
6270e593
BK
10179 rc = pci_enable_device(pdev);
10180
10181 if (rc || pci_channel_offline(pdev)) {
10182 if (pci_channel_offline(pdev)) {
10183 ipr_wait_for_pci_err_recovery(ioa_cfg);
10184 rc = pci_enable_device(pdev);
10185 }
10186
10187 if (rc) {
10188 dev_err(&pdev->dev, "Cannot enable adapter\n");
10189 ipr_wait_for_pci_err_recovery(ioa_cfg);
10190 goto out_release_regions;
10191 }
10192 }
10193
25729a7f 10194 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
10195
10196 if (!ipr_regs) {
10197 dev_err(&pdev->dev,
10198 "Couldn't map memory range of registers\n");
10199 rc = -ENOMEM;
6270e593 10200 goto out_disable;
1da177e4
LT
10201 }
10202
10203 ioa_cfg->hdw_dma_regs = ipr_regs;
10204 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10205 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10206
6270e593 10207 ipr_init_regs(ioa_cfg);
1da177e4 10208
a32c055f 10209 if (ioa_cfg->sis64) {
869404cb 10210 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 10211 if (rc < 0) {
869404cb
AB
10212 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10213 rc = dma_set_mask_and_coherent(&pdev->dev,
10214 DMA_BIT_MASK(32));
a32c055f 10215 }
a32c055f 10216 } else
869404cb 10217 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 10218
1da177e4 10219 if (rc < 0) {
869404cb 10220 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
10221 goto cleanup_nomem;
10222 }
10223
10224 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10225 ioa_cfg->chip_cfg->cache_line_size);
10226
10227 if (rc != PCIBIOS_SUCCESSFUL) {
10228 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 10229 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
10230 rc = -EIO;
10231 goto cleanup_nomem;
10232 }
10233
6270e593
BK
10234 /* Issue MMIO read to ensure card is not in EEH */
10235 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10236 ipr_wait_for_pci_err_recovery(ioa_cfg);
10237
05a6538a 10238 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10239 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10240 IPR_MAX_MSIX_VECTORS);
10241 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10242 }
10243
a299ee62
CH
10244 irq_flag = PCI_IRQ_LEGACY;
10245 if (ioa_cfg->ipr_chip->has_msi)
10246 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10247 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10248 if (rc < 0) {
10249 ipr_wait_for_pci_err_recovery(ioa_cfg);
10250 goto cleanup_nomem;
05a6538a 10251 }
a299ee62
CH
10252 ioa_cfg->nvectors = rc;
10253
10254 if (!pdev->msi_enabled && !pdev->msix_enabled)
10255 ioa_cfg->clear_isr = 1;
05a6538a 10256
6270e593
BK
10257 pci_set_master(pdev);
10258
10259 if (pci_channel_offline(pdev)) {
10260 ipr_wait_for_pci_err_recovery(ioa_cfg);
10261 pci_set_master(pdev);
10262 if (pci_channel_offline(pdev)) {
10263 rc = -EIO;
10264 goto out_msi_disable;
10265 }
10266 }
10267
a299ee62 10268 if (pdev->msi_enabled || pdev->msix_enabled) {
95fecd90 10269 rc = ipr_test_msi(ioa_cfg, pdev);
a299ee62
CH
10270 switch (rc) {
10271 case 0:
10272 dev_info(&pdev->dev,
10273 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10274 pdev->msix_enabled ? "-X" : "");
10275 break;
10276 case -EOPNOTSUPP:
6270e593 10277 ipr_wait_for_pci_err_recovery(ioa_cfg);
a299ee62 10278 pci_free_irq_vectors(pdev);
05a6538a 10279
05a6538a 10280 ioa_cfg->nvectors = 1;
9dadfb97 10281 ioa_cfg->clear_isr = 1;
a299ee62
CH
10282 break;
10283 default:
95fecd90 10284 goto out_msi_disable;
05a6538a 10285 }
10286 }
10287
10288 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10289 (unsigned int)num_online_cpus(),
10290 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 10291
1da177e4 10292 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 10293 goto out_msi_disable;
1da177e4
LT
10294
10295 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 10296 goto out_msi_disable;
1da177e4
LT
10297
10298 rc = ipr_alloc_mem(ioa_cfg);
10299 if (rc < 0) {
10300 dev_err(&pdev->dev,
10301 "Couldn't allocate enough memory for device driver!\n");
f170c684 10302 goto out_msi_disable;
1da177e4
LT
10303 }
10304
6270e593
BK
10305 /* Save away PCI config space for use following IOA reset */
10306 rc = pci_save_state(pdev);
10307
10308 if (rc != PCIBIOS_SUCCESSFUL) {
10309 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10310 rc = -EIO;
10311 goto cleanup_nolog;
10312 }
10313
ce155cce 10314 /*
10315 * If HRRQ updated interrupt is not masked, or reset alert is set,
10316 * the card is in an unknown state and needs a hard reset
10317 */
214777ba
WB
10318 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10319 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10320 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 10321 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10322 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 10323 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
10324 ioa_cfg->needs_hard_reset = 1;
10325 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10326 ioa_cfg->ioa_unit_checked = 1;
ce155cce 10327
56d6aa33 10328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10329 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 10330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10331
a299ee62 10332 if (pdev->msi_enabled || pdev->msix_enabled) {
05a6538a 10333 name_msi_vectors(ioa_cfg);
a299ee62 10334 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
05a6538a 10335 ioa_cfg->vectors_info[0].desc,
10336 &ioa_cfg->hrrq[0]);
10337 if (!rc)
a299ee62 10338 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
05a6538a 10339 } else {
10340 rc = request_irq(pdev->irq, ipr_isr,
10341 IRQF_SHARED,
10342 IPR_NAME, &ioa_cfg->hrrq[0]);
10343 }
1da177e4
LT
10344 if (rc) {
10345 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10346 pdev->irq, rc);
10347 goto cleanup_nolog;
10348 }
10349
463fc696
BK
10350 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10351 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10352 ioa_cfg->needs_warm_reset = 1;
10353 ioa_cfg->reset = ipr_reset_slot_reset;
2796ca5e
BK
10354
10355 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10356 WQ_MEM_RECLAIM, host->host_no);
10357
10358 if (!ioa_cfg->reset_work_q) {
10359 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
c8e18acc 10360 rc = -ENOMEM;
2796ca5e
BK
10361 goto out_free_irq;
10362 }
463fc696
BK
10363 } else
10364 ioa_cfg->reset = ipr_reset_start_bist;
10365
feccada9 10366 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10367 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 10368 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10369
10370 LEAVE;
10371out:
10372 return rc;
10373
2796ca5e
BK
10374out_free_irq:
10375 ipr_free_irqs(ioa_cfg);
1da177e4
LT
10376cleanup_nolog:
10377 ipr_free_mem(ioa_cfg);
95fecd90 10378out_msi_disable:
6270e593 10379 ipr_wait_for_pci_err_recovery(ioa_cfg);
a299ee62 10380 pci_free_irq_vectors(pdev);
f170c684
JL
10381cleanup_nomem:
10382 iounmap(ipr_regs);
6270e593
BK
10383out_disable:
10384 pci_disable_device(pdev);
1da177e4
LT
10385out_release_regions:
10386 pci_release_regions(pdev);
10387out_scsi_host_put:
10388 scsi_host_put(host);
1da177e4
LT
10389 goto out;
10390}
10391
1da177e4
LT
10392/**
10393 * ipr_initiate_ioa_bringdown - Bring down an adapter
10394 * @ioa_cfg: ioa config struct
10395 * @shutdown_type: shutdown type
10396 *
10397 * Description: This function will initiate bringing down the adapter.
10398 * This consists of issuing an IOA shutdown to the adapter
10399 * to flush the cache, and running BIST.
10400 * If the caller needs to wait on the completion of the reset,
10401 * the caller must sleep on the reset_wait_q.
10402 *
10403 * Return value:
10404 * none
10405 **/
10406static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10407 enum ipr_shutdown_type shutdown_type)
10408{
10409 ENTER;
10410 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10411 ioa_cfg->sdt_state = ABORT_DUMP;
10412 ioa_cfg->reset_retries = 0;
10413 ioa_cfg->in_ioa_bringdown = 1;
10414 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10415 LEAVE;
10416}
10417
10418/**
10419 * __ipr_remove - Remove a single adapter
10420 * @pdev: pci device struct
10421 *
10422 * Adapter hot plug remove entry point.
10423 *
10424 * Return value:
10425 * none
10426 **/
10427static void __ipr_remove(struct pci_dev *pdev)
10428{
10429 unsigned long host_lock_flags = 0;
10430 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 10431 int i;
feccada9 10432 unsigned long driver_lock_flags;
1da177e4
LT
10433 ENTER;
10434
10435 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 10436 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10438 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10439 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10440 }
10441
bfae7820
BK
10442 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10443 spin_lock(&ioa_cfg->hrrq[i]._lock);
10444 ioa_cfg->hrrq[i].removing_ioa = 1;
10445 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10446 }
10447 wmb();
1da177e4
LT
10448 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10449
10450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10451 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 10452 flush_work(&ioa_cfg->work_q);
2796ca5e
BK
10453 if (ioa_cfg->reset_work_q)
10454 flush_workqueue(ioa_cfg->reset_work_q);
9077a944 10455 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
10456 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10457
feccada9 10458 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10459 list_del(&ioa_cfg->queue);
feccada9 10460 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10461
10462 if (ioa_cfg->sdt_state == ABORT_DUMP)
10463 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10464 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10465
10466 ipr_free_all_resources(ioa_cfg);
10467
10468 LEAVE;
10469}
10470
10471/**
10472 * ipr_remove - IOA hot plug remove entry point
10473 * @pdev: pci device struct
10474 *
10475 * Adapter hot plug remove entry point.
10476 *
10477 * Return value:
10478 * none
10479 **/
6f039790 10480static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
10481{
10482 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10483
10484 ENTER;
10485
ee959b00 10486 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10487 &ipr_trace_attr);
ee959b00 10488 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10489 &ipr_dump_attr);
afc3f83c
BK
10490 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10491 &ipr_ioa_async_err_log);
1da177e4
LT
10492 scsi_remove_host(ioa_cfg->host);
10493
10494 __ipr_remove(pdev);
10495
10496 LEAVE;
10497}
10498
10499/**
10500 * ipr_probe - Adapter hot plug add entry point
10501 *
10502 * Return value:
10503 * 0 on success / non-zero on failure
10504 **/
6f039790 10505static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
10506{
10507 struct ipr_ioa_cfg *ioa_cfg;
b195d5e2 10508 unsigned long flags;
b53d124a 10509 int rc, i;
1da177e4
LT
10510
10511 rc = ipr_probe_ioa(pdev, dev_id);
10512
10513 if (rc)
10514 return rc;
10515
10516 ioa_cfg = pci_get_drvdata(pdev);
10517 rc = ipr_probe_ioa_part2(ioa_cfg);
10518
10519 if (rc) {
10520 __ipr_remove(pdev);
10521 return rc;
10522 }
10523
10524 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10525
10526 if (rc) {
10527 __ipr_remove(pdev);
10528 return rc;
10529 }
10530
ee959b00 10531 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10532 &ipr_trace_attr);
10533
10534 if (rc) {
10535 scsi_remove_host(ioa_cfg->host);
10536 __ipr_remove(pdev);
10537 return rc;
10538 }
10539
afc3f83c
BK
10540 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10541 &ipr_ioa_async_err_log);
10542
10543 if (rc) {
10544 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10545 &ipr_dump_attr);
10546 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10547 &ipr_trace_attr);
10548 scsi_remove_host(ioa_cfg->host);
10549 __ipr_remove(pdev);
10550 return rc;
10551 }
10552
ee959b00 10553 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10554 &ipr_dump_attr);
10555
10556 if (rc) {
afc3f83c
BK
10557 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10558 &ipr_ioa_async_err_log);
ee959b00 10559 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10560 &ipr_trace_attr);
10561 scsi_remove_host(ioa_cfg->host);
10562 __ipr_remove(pdev);
10563 return rc;
10564 }
a3d1ddd9
BK
10565 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10566 ioa_cfg->scan_enabled = 1;
10567 schedule_work(&ioa_cfg->work_q);
10568 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
1da177e4 10569
b53d124a 10570 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10571
89f8b33c 10572 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10573 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
511cbce2 10574 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
b53d124a 10575 ioa_cfg->iopoll_weight, ipr_iopoll);
b53d124a 10576 }
10577 }
10578
a3d1ddd9
BK
10579 scsi_scan_host(ioa_cfg->host);
10580
1da177e4
LT
10581 return 0;
10582}
10583
10584/**
10585 * ipr_shutdown - Shutdown handler.
d18c3db5 10586 * @pdev: pci device struct
1da177e4
LT
10587 *
10588 * This function is invoked upon system shutdown/reboot. It will issue
10589 * an adapter shutdown to the adapter to flush the write cache.
10590 *
10591 * Return value:
10592 * none
10593 **/
d18c3db5 10594static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10595{
d18c3db5 10596 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10597 unsigned long lock_flags = 0;
4fdd7c7a 10598 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
b53d124a 10599 int i;
1da177e4
LT
10600
10601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10602 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10603 ioa_cfg->iopoll_weight = 0;
10604 for (i = 1; i < ioa_cfg->hrrq_num; i++)
511cbce2 10605 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
b53d124a 10606 }
10607
203fa3fe 10608 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10609 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10610 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10611 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10612 }
10613
4fdd7c7a
BK
10614 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10615 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10616
10617 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
1da177e4
LT
10618 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10619 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4fdd7c7a 10620 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
2796ca5e 10621 ipr_free_irqs(ioa_cfg);
4fdd7c7a
BK
10622 pci_disable_device(ioa_cfg->pdev);
10623 }
1da177e4
LT
10624}
10625
6f039790 10626static struct pci_device_id ipr_pci_table[] = {
1da177e4 10627 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10629 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10630 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10631 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10632 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10633 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10634 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10635 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10636 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10637 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10638 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10639 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10640 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10641 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10642 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10643 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10644 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10645 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10646 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10647 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10648 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10649 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10650 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10651 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10652 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10653 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10654 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10656 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10657 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10659 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10660 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10662 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10665 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10667 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10669 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10670 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10672 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10674 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10676 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10677 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10679 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10700 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10710 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10724 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10726 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10730 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
00da9ffa
WX
10732 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10734 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10735 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
1da177e4
LT
10736 { }
10737};
10738MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10739
a55b2d21 10740static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10741 .error_detected = ipr_pci_error_detected,
6270e593 10742 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10743 .slot_reset = ipr_pci_slot_reset,
10744};
10745
1da177e4
LT
10746static struct pci_driver ipr_driver = {
10747 .name = IPR_NAME,
10748 .id_table = ipr_pci_table,
10749 .probe = ipr_probe,
6f039790 10750 .remove = ipr_remove,
d18c3db5 10751 .shutdown = ipr_shutdown,
f8a88b19 10752 .err_handler = &ipr_err_handler,
1da177e4
LT
10753};
10754
f72919ec
WB
10755/**
10756 * ipr_halt_done - Shutdown prepare completion
10757 *
10758 * Return value:
10759 * none
10760 **/
10761static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10762{
05a6538a 10763 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10764}
10765
10766/**
10767 * ipr_halt - Issue shutdown prepare to all adapters
10768 *
10769 * Return value:
10770 * NOTIFY_OK on success / NOTIFY_DONE on failure
10771 **/
10772static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10773{
10774 struct ipr_cmnd *ipr_cmd;
10775 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10776 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10777
10778 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10779 return NOTIFY_DONE;
10780
feccada9 10781 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10782
10783 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10784 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4fdd7c7a
BK
10785 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10786 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
f72919ec
WB
10787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10788 continue;
10789 }
10790
10791 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10792 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10793 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10794 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10795 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10796
10797 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10798 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10799 }
feccada9 10800 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10801
10802 return NOTIFY_OK;
10803}
10804
10805static struct notifier_block ipr_notifier = {
10806 ipr_halt, NULL, 0
10807};
10808
1da177e4
LT
10809/**
10810 * ipr_init - Module entry point
10811 *
10812 * Return value:
10813 * 0 on success / negative value on failure
10814 **/
10815static int __init ipr_init(void)
10816{
10817 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10818 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10819
f72919ec 10820 register_reboot_notifier(&ipr_notifier);
dcbccbde 10821 return pci_register_driver(&ipr_driver);
1da177e4
LT
10822}
10823
10824/**
10825 * ipr_exit - Module unload
10826 *
10827 * Module unload entry point.
10828 *
10829 * Return value:
10830 * none
10831 **/
10832static void __exit ipr_exit(void)
10833{
f72919ec 10834 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10835 pci_unregister_driver(&ipr_driver);
10836}
10837
10838module_init(ipr_init);
10839module_exit(ipr_exit);